remoteproc_virtio.c 15 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10 11
/*
 * Remote processor messaging transport (OMAP platform-specific bits)
 *
 * Copyright (C) 2011 Texas Instruments, Inc.
 * Copyright (C) 2011 Google, Inc.
 *
 * Ohad Ben-Cohen <ohad@wizery.com>
 * Brian Swetland <swetland@google.com>
 */

12
#include <linux/dma-direct.h>
13
#include <linux/dma-map-ops.h>
14
#include <linux/dma-mapping.h>
15
#include <linux/export.h>
16
#include <linux/of_reserved_mem.h>
17
#include <linux/platform_device.h>
18 19 20 21 22 23 24 25 26 27 28
#include <linux/remoteproc.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
#include <linux/err.h>
#include <linux/kref.h>
#include <linux/slab.h>

#include "remoteproc_internal.h"

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
static int copy_dma_range_map(struct device *to, struct device *from)
{
	const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
	int num_ranges = 0;

	if (!map)
		return 0;

	for (r = map; r->size; r++)
		num_ranges++;

	new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
			  GFP_KERNEL);
	if (!new_map)
		return -ENOMEM;
	to->dma_range_map = new_map;
	return 0;
}

48 49
static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{
50 51 52 53 54
	struct platform_device *pdev;

	pdev = container_of(vdev->dev.parent, struct platform_device, dev);

	return platform_get_drvdata(pdev);
55 56 57 58 59 60 61 62 63
}

static  struct rproc *vdev_to_rproc(struct virtio_device *vdev)
{
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);

	return rvdev->rproc;
}

64
/* kick the remote processor, and let it know which virtqueue to poke at */
65
static bool rproc_virtio_notify(struct virtqueue *vq)
66
{
67 68 69
	struct rproc_vring *rvring = vq->priv;
	struct rproc *rproc = rvring->rvdev->rproc;
	int notifyid = rvring->notifyid;
70

71
	dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
72

73
	rproc->ops->kick(rproc, notifyid);
74
	return true;
75 76 77 78 79
}

/**
 * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
 * @rproc: handle to the remote processor
80
 * @notifyid: index of the signalled virtqueue (unique per this @rproc)
81 82 83 84 85
 *
 * This function should be called by the platform-specific rproc driver,
 * when the remote processor signals that a specific virtqueue has pending
 * messages available.
 *
86
 * Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
87 88
 * and otherwise returns IRQ_HANDLED.
 */
89
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
90
{
91 92
	struct rproc_vring *rvring;

93
	dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
94 95 96 97 98 99

	rvring = idr_find(&rproc->notifyids, notifyid);
	if (!rvring || !rvring->vq)
		return IRQ_NONE;

	return vring_interrupt(0, rvring->vq);
100 101 102 103
}
EXPORT_SYMBOL(rproc_vq_interrupt);

static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
104
				    unsigned int id,
105
				    void (*callback)(struct virtqueue *vq),
106
				    const char *name, bool ctx)
107
{
108
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
109
	struct rproc *rproc = vdev_to_rproc(vdev);
110
	struct device *dev = &rproc->dev;
111
	struct rproc_mem_entry *mem;
112
	struct rproc_vring *rvring;
113
	struct fw_rsc_vdev *rsc;
114 115
	struct virtqueue *vq;
	void *addr;
116
	int num, size;
117

118 119 120 121
	/* we're temporarily limited to two virtqueues per rvdev */
	if (id >= ARRAY_SIZE(rvdev->vring))
		return ERR_PTR(-EINVAL);

122 123 124
	if (!name)
		return NULL;

125 126 127 128 129 130
	/* Search allocated memory region by name */
	mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
					  id);
	if (!mem || !mem->va)
		return ERR_PTR(-ENOMEM);

131
	rvring = &rvdev->vring[id];
132
	addr = mem->va;
133
	num = rvring->num;
134

135
	/* zero vring */
136
	size = vring_size(num, rvring->align);
137
	memset(addr, 0, size);
138

139
	dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
140
		id, addr, num, rvring->notifyid);
141

142 143 144 145
	/*
	 * Create the new vq, and tell virtio we're not interested in
	 * the 'weak' smp barriers, since we're talking with a real device.
	 */
146
	vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
147
				 addr, rproc_virtio_notify, callback, name);
148
	if (!vq) {
149
		dev_err(dev, "vring_new_virtqueue %s failed\n", name);
150
		rproc_free_vring(rvring);
151
		return ERR_PTR(-ENOMEM);
152 153
	}

154 155
	vq->num_max = num;

156 157
	rvring->vq = vq;
	vq->priv = rvring;
158

159 160 161 162
	/* Update vring in resource table */
	rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
	rsc->vring[id].da = mem->da;

163 164 165
	return vq;
}

166
static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
167 168
{
	struct virtqueue *vq, *n;
169
	struct rproc_vring *rvring;
170 171

	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
172 173
		rvring = vq->priv;
		rvring->vq = NULL;
174 175 176 177
		vring_del_virtqueue(vq);
	}
}

178 179 180 181 182
static void rproc_virtio_del_vqs(struct virtio_device *vdev)
{
	__rproc_virtio_del_vqs(vdev);
}

183
static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
184 185
				 struct virtqueue *vqs[],
				 vq_callback_t *callbacks[],
186
				 const char * const names[],
187
				 const bool * ctx,
188
				 struct irq_affinity *desc)
189
{
190
	int i, ret, queue_idx = 0;
191 192

	for (i = 0; i < nvqs; ++i) {
193 194 195 196 197 198
		if (!names[i]) {
			vqs[i] = NULL;
			continue;
		}

		vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
199
				    ctx ? ctx[i] : false);
200 201 202 203 204 205 206 207 208
		if (IS_ERR(vqs[i])) {
			ret = PTR_ERR(vqs[i]);
			goto error;
		}
	}

	return 0;

error:
209
	__rproc_virtio_del_vqs(vdev);
210 211 212 213 214
	return ret;
}

static u8 rproc_virtio_get_status(struct virtio_device *vdev)
{
215 216 217 218 219 220
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
	struct fw_rsc_vdev *rsc;

	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;

	return rsc->status;
221 222 223 224
}

static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
{
225 226 227 228 229 230
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
	struct fw_rsc_vdev *rsc;

	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;

	rsc->status = status;
231
	dev_dbg(&vdev->dev, "status: %d\n", status);
232 233 234 235
}

static void rproc_virtio_reset(struct virtio_device *vdev)
{
236 237 238 239 240 241
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
	struct fw_rsc_vdev *rsc;

	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;

	rsc->status = 0;
242 243 244 245
	dev_dbg(&vdev->dev, "reset !\n");
}

/* provide the vdev features as retrieved from the firmware */
246
static u64 rproc_virtio_get_features(struct virtio_device *vdev)
247
{
248
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
249 250 251
	struct fw_rsc_vdev *rsc;

	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
252

253
	return rsc->dfeatures;
254 255
}

256 257 258 259 260 261 262 263 264 265
static void rproc_transport_features(struct virtio_device *vdev)
{
	/*
	 * Packed ring isn't enabled on remoteproc for now,
	 * because remoteproc uses vring_new_virtqueue() which
	 * creates virtio rings on preallocated memory.
	 */
	__virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
}

266
static int rproc_virtio_finalize_features(struct virtio_device *vdev)
267
{
268
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
269 270 271
	struct fw_rsc_vdev *rsc;

	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
272 273 274 275

	/* Give virtio_ring a chance to accept features */
	vring_transport_features(vdev);

276 277 278
	/* Give virtio_rproc a chance to accept features. */
	rproc_transport_features(vdev);

279 280 281
	/* Make sure we don't have any features > 32 bits! */
	BUG_ON((u32)vdev->features != vdev->features);

282 283 284 285
	/*
	 * Remember the finalized features of our vdev, and provide it
	 * to the remote processor once it is powered on.
	 */
286
	rsc->gfeatures = vdev->features;
287 288

	return 0;
289 290
}

291 292
static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
			     void *buf, unsigned int len)
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
{
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
	struct fw_rsc_vdev *rsc;
	void *cfg;

	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
	cfg = &rsc->vring[rsc->num_of_vrings];

	if (offset + len > rsc->config_len || offset + len < len) {
		dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
		return;
	}

	memcpy(buf, cfg + offset, len);
}

309 310
static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
			     const void *buf, unsigned int len)
311 312 313 314 315 316 317 318 319 320 321 322 323 324
{
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
	struct fw_rsc_vdev *rsc;
	void *cfg;

	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
	cfg = &rsc->vring[rsc->num_of_vrings];

	if (offset + len > rsc->config_len || offset + len < len) {
		dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
		return;
	}

	memcpy(cfg + offset, buf, len);
325 326
}

327
static const struct virtio_config_ops rproc_virtio_config_ops = {
328 329 330 331 332 333 334
	.get_features	= rproc_virtio_get_features,
	.finalize_features = rproc_virtio_finalize_features,
	.find_vqs	= rproc_virtio_find_vqs,
	.del_vqs	= rproc_virtio_del_vqs,
	.reset		= rproc_virtio_reset,
	.set_status	= rproc_virtio_set_status,
	.get_status	= rproc_virtio_get_status,
335 336
	.get		= rproc_virtio_get,
	.set		= rproc_virtio_set,
337 338 339 340
};

/*
 * This function is called whenever vdev is released, and is responsible
341
 * to decrement the remote processor's refcount which was taken when vdev was
342 343 344 345 346
 * added.
 *
 * Never call this function directly; it will be called by the driver
 * core when needed.
 */
347
static void rproc_virtio_dev_release(struct device *dev)
348 349
{
	struct virtio_device *vdev = dev_to_virtio(dev);
350
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
351

352 353
	kfree(vdev);

354 355 356
	of_reserved_mem_device_release(&rvdev->pdev->dev);
	dma_release_coherent_memory(&rvdev->pdev->dev);

357
	put_device(&rvdev->pdev->dev);
358 359 360
}

/**
361 362
 * rproc_add_virtio_dev() - register an rproc-induced virtio device
 * @rvdev: the remote vdev
363
 * @id: the device type identification (used to match it with a driver).
364
 *
365 366
 * This function registers a virtio device. This vdev's partent is
 * the rproc device.
367
 *
368
 * Return: 0 on success or an appropriate error value otherwise
369
 */
370
static int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
371
{
372
	struct rproc *rproc = rvdev->rproc;
373
	struct device *dev = &rvdev->pdev->dev;
374
	struct virtio_device *vdev;
375
	struct rproc_mem_entry *mem;
376 377
	int ret;

378 379
	if (rproc->ops->kick == NULL) {
		ret = -EINVAL;
380
		dev_err(dev, ".kick method not defined for %s\n", rproc->name);
381 382 383
		goto out;
	}

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
	/* Try to find dedicated vdev buffer carveout */
	mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
	if (mem) {
		phys_addr_t pa;

		if (mem->of_resm_idx != -1) {
			struct device_node *np = rproc->dev.parent->of_node;

			/* Associate reserved memory to vdev device */
			ret = of_reserved_mem_device_init_by_idx(dev, np,
								 mem->of_resm_idx);
			if (ret) {
				dev_err(dev, "Can't associate reserved memory\n");
				goto out;
			}
		} else {
			if (mem->va) {
				dev_warn(dev, "vdev %d buffer already mapped\n",
					 rvdev->index);
				pa = rproc_va_to_pa(mem->va);
			} else {
				/* Use dma address as carveout no memmapped yet */
				pa = (phys_addr_t)mem->dma;
			}

			/* Associate vdev buffer memory pool to vdev subdev */
			ret = dma_declare_coherent_memory(dev, pa,
							   mem->da,
412
							   mem->len);
413 414 415 416 417
			if (ret < 0) {
				dev_err(dev, "Failed to associate buffer\n");
				goto out;
			}
		}
418 419 420 421 422 423 424 425 426 427 428 429
	} else {
		struct device_node *np = rproc->dev.parent->of_node;

		/*
		 * If we don't have dedicated buffer, just attempt to re-assign
		 * the reserved memory from our parent. A default memory-region
		 * at index 0 from the parent's memory-regions is assigned for
		 * the rvdev dev to allocate from. Failure is non-critical and
		 * the allocations will fall back to global pools, so don't
		 * check return value either.
		 */
		of_reserved_mem_device_init_by_idx(dev, np, 0);
430 431
	}

432 433 434 435 436 437
	/* Allocate virtio device */
	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
	if (!vdev) {
		ret = -ENOMEM;
		goto out;
	}
438 439 440
	vdev->id.device	= id,
	vdev->config = &rproc_virtio_config_ops,
	vdev->dev.parent = dev;
441
	vdev->dev.release = rproc_virtio_dev_release;
442

443
	/* Reference the vdev and vring allocations */
444
	get_device(dev);
445

446
	ret = register_virtio_device(vdev);
447
	if (ret) {
448
		put_device(&vdev->dev);
449
		dev_err(dev, "failed to register vdev: %d\n", ret);
450
		goto out;
451 452
	}

453 454 455
	dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);

out:
456 457 458 459
	return ret;
}

/**
460
 * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
461 462
 * @dev: the virtio device
 * @data: must be null
463
 *
464
 * This function unregisters an existing virtio device.
465 466
 *
 * Return: 0
467
 */
468
static int rproc_remove_virtio_dev(struct device *dev, void *data)
469
{
470 471 472 473
	struct virtio_device *vdev = dev_to_virtio(dev);

	unregister_virtio_device(vdev);
	return 0;
474
}
475 476 477 478 479 480 481 482 483 484 485

static int rproc_vdev_do_start(struct rproc_subdev *subdev)
{
	struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);

	return rproc_add_virtio_dev(rvdev, rvdev->id);
}

static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
{
	struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
486
	struct device *dev = &rvdev->pdev->dev;
487 488
	int ret;

489
	ret = device_for_each_child(dev, NULL, rproc_remove_virtio_dev);
490
	if (ret)
491
		dev_warn(dev, "can't remove vdev child device: %d\n", ret);
492 493
}

494
static int rproc_virtio_probe(struct platform_device *pdev)
495
{
496 497
	struct device *dev = &pdev->dev;
	struct rproc_vdev_data *rvdev_data = dev->platform_data;
498
	struct rproc_vdev *rvdev;
499 500
	struct rproc *rproc = container_of(dev->parent, struct rproc, dev);
	struct fw_rsc_vdev *rsc;
501 502
	int i, ret;

503 504
	if (!rvdev_data)
		return -EINVAL;
505

506 507 508
	rvdev = devm_kzalloc(dev, sizeof(*rvdev), GFP_KERNEL);
	if (!rvdev)
		return -ENOMEM;
509 510 511 512 513

	rvdev->id = rvdev_data->id;
	rvdev->rproc = rproc;
	rvdev->index = rvdev_data->index;

514
	ret = copy_dma_range_map(dev, rproc->dev.parent);
515
	if (ret)
516
		return ret;
517 518

	/* Make device dma capable by inheriting from parent's capabilities */
519
	set_dma_ops(dev, get_dma_ops(rproc->dev.parent));
520

521
	ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent));
522
	if (ret) {
523
		dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
524 525 526
			 dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
	}

527 528 529 530 531
	platform_set_drvdata(pdev, rvdev);
	rvdev->pdev = pdev;

	rsc = rvdev_data->rsc;

532 533 534 535
	/* parse the vrings */
	for (i = 0; i < rsc->num_of_vrings; i++) {
		ret = rproc_parse_vring(rvdev, rsc, i);
		if (ret)
536
			return ret;
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
	}

	/* remember the resource offset*/
	rvdev->rsc_offset = rvdev_data->rsc_offset;

	/* allocate the vring resources */
	for (i = 0; i < rsc->num_of_vrings; i++) {
		ret = rproc_alloc_vring(rvdev, i);
		if (ret)
			goto unwind_vring_allocations;
	}

	rproc_add_rvdev(rproc, rvdev);

	rvdev->subdev.start = rproc_vdev_do_start;
	rvdev->subdev.stop = rproc_vdev_do_stop;

	rproc_add_subdev(rproc, &rvdev->subdev);

556 557 558 559 560 561 562 563 564 565 566
	/*
	 * We're indirectly making a non-temporary copy of the rproc pointer
	 * here, because the platform device or the vdev device will indirectly
	 * access the wrapping rproc.
	 *
	 * Therefore we must increment the rproc refcount here, and decrement
	 * it _only_ on platform remove.
	 */
	get_device(&rproc->dev);

	return 0;
567 568 569 570

unwind_vring_allocations:
	for (i--; i >= 0; i--)
		rproc_free_vring(&rvdev->vring[i]);
571 572

	return ret;
573 574
}

575
static void rproc_virtio_remove(struct platform_device *pdev)
576
{
577
	struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev);
578
	struct rproc *rproc = rvdev->rproc;
579
	struct rproc_vring *rvring;
580 581 582 583 584 585 586 587 588
	int id;

	for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
		rvring = &rvdev->vring[id];
		rproc_free_vring(rvring);
	}

	rproc_remove_subdev(rproc, &rvdev->subdev);
	rproc_remove_rvdev(rvdev);
589 590

	put_device(&rproc->dev);
591
}
592 593 594 595

/* Platform driver */
static struct platform_driver rproc_virtio_driver = {
	.probe		= rproc_virtio_probe,
596
	.remove_new	= rproc_virtio_remove,
597 598 599 600 601
	.driver		= {
		.name	= "rproc-virtio",
	},
};
builtin_platform_driver(rproc_virtio_driver);