pci-epf-test.c 23.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10
/**
 * Test driver to test endpoint functionality
 *
 * Copyright (C) 2017 Texas Instruments
 * Author: Kishon Vijay Abraham I <kishon@ti.com>
 */

#include <linux/crc32.h>
#include <linux/delay.h>
11
#include <linux/dmaengine.h>
12 13 14 15 16 17 18 19 20 21
#include <linux/io.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
#include <linux/random.h>

#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
#include <linux/pci_regs.h>

22 23
#define IRQ_TYPE_LEGACY			0
#define IRQ_TYPE_MSI			1
24
#define IRQ_TYPE_MSIX			2
25

26 27
#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
#define COMMAND_RAISE_MSI_IRQ		BIT(1)
28
#define COMMAND_RAISE_MSIX_IRQ		BIT(2)
29 30 31
#define COMMAND_READ			BIT(3)
#define COMMAND_WRITE			BIT(4)
#define COMMAND_COPY			BIT(5)
32 33 34 35 36 37 38 39 40 41 42

#define STATUS_READ_SUCCESS		BIT(0)
#define STATUS_READ_FAIL		BIT(1)
#define STATUS_WRITE_SUCCESS		BIT(2)
#define STATUS_WRITE_FAIL		BIT(3)
#define STATUS_COPY_SUCCESS		BIT(4)
#define STATUS_COPY_FAIL		BIT(5)
#define STATUS_IRQ_RAISED		BIT(6)
#define STATUS_SRC_ADDR_INVALID		BIT(7)
#define STATUS_DST_ADDR_INVALID		BIT(8)

43 44
#define FLAG_USE_DMA			BIT(0)

45 46 47 48 49
#define TIMER_RESOLUTION		1

static struct workqueue_struct *kpcitest_workqueue;

struct pci_epf_test {
50
	void			*reg[PCI_STD_NUM_BARS];
51
	struct pci_epf		*epf;
52
	enum pci_barno		test_reg_bar;
53
	size_t			msix_table_offset;
54
	struct delayed_work	cmd_handler;
55 56 57
	struct dma_chan		*dma_chan;
	struct completion	transfer_complete;
	bool			dma_supported;
58
	const struct pci_epc_features *epc_features;
59 60 61 62 63 64 65 66 67 68
};

struct pci_epf_test_reg {
	u32	magic;
	u32	command;
	u32	status;
	u64	src_addr;
	u64	dst_addr;
	u32	size;
	u32	checksum;
69 70
	u32	irq_type;
	u32	irq_number;
71
	u32	flags;
72 73 74 75 76 77 78 79 80
} __packed;

static struct pci_epf_header test_header = {
	.vendorid	= PCI_ANY_ID,
	.deviceid	= PCI_ANY_ID,
	.baseclass_code = PCI_CLASS_OTHERS,
	.interrupt_pin	= PCI_INTERRUPT_INTA,
};

81
static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
82

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
static void pci_epf_test_dma_callback(void *param)
{
	struct pci_epf_test *epf_test = param;

	complete(&epf_test->transfer_complete);
}

/**
 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
 *				  data between PCIe EP and remote PCIe RC
 * @epf_test: the EPF test device that performs the data transfer operation
 * @dma_dst: The destination address of the data transfer. It can be a physical
 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
 * @dma_src: The source address of the data transfer. It can be a physical
 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
 * @len: The size of the data transfer
 *
 * Function that uses dmaengine API to transfer data between PCIe EP and remote
 * PCIe RC. The source and destination address can be a physical address given
 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
 *
 * The function returns '0' on success and negative value on failure.
 */
static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
				      dma_addr_t dma_dst, dma_addr_t dma_src,
				      size_t len)
{
	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
	struct dma_chan *chan = epf_test->dma_chan;
	struct pci_epf *epf = epf_test->epf;
	struct dma_async_tx_descriptor *tx;
	struct device *dev = &epf->dev;
	dma_cookie_t cookie;
	int ret;

	if (IS_ERR_OR_NULL(chan)) {
		dev_err(dev, "Invalid DMA memcpy channel\n");
		return -EINVAL;
	}

	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
	if (!tx) {
		dev_err(dev, "Failed to prepare DMA memcpy\n");
		return -EIO;
	}

	tx->callback = pci_epf_test_dma_callback;
	tx->callback_param = epf_test;
	cookie = tx->tx_submit(tx);
	reinit_completion(&epf_test->transfer_complete);

	ret = dma_submit_error(cookie);
	if (ret) {
		dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
		return -EIO;
	}

	dma_async_issue_pending(chan);
	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
	if (ret < 0) {
		dmaengine_terminate_sync(chan);
		dev_err(dev, "DMA wait_for_completion_timeout\n");
		return -ETIMEDOUT;
	}

	return 0;
}

/**
 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
 * @epf_test: the EPF test device that performs data transfer operation
 *
 * Function to initialize EPF test DMA channel.
 */
static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
{
	struct pci_epf *epf = epf_test->epf;
	struct device *dev = &epf->dev;
	struct dma_chan *dma_chan;
	dma_cap_mask_t mask;
	int ret;

	dma_cap_zero(mask);
	dma_cap_set(DMA_MEMCPY, mask);

	dma_chan = dma_request_chan_by_mask(&mask);
	if (IS_ERR(dma_chan)) {
		ret = PTR_ERR(dma_chan);
		if (ret != -EPROBE_DEFER)
			dev_err(dev, "Failed to get DMA channel\n");
		return ret;
	}
	init_completion(&epf_test->transfer_complete);

	epf_test->dma_chan = dma_chan;

	return 0;
}

/**
 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
 * @epf: the EPF test device that performs data transfer operation
 *
 * Helper to cleanup EPF test DMA channel.
 */
static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
{
	dma_release_channel(epf_test->dma_chan);
	epf_test->dma_chan = NULL;
}

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
static void pci_epf_test_print_rate(const char *ops, u64 size,
				    struct timespec64 *start,
				    struct timespec64 *end, bool dma)
{
	struct timespec64 ts;
	u64 rate, ns;

	ts = timespec64_sub(*end, *start);

	/* convert both size (stored in 'rate') and time in terms of 'ns' */
	ns = timespec64_to_ns(&ts);
	rate = size * NSEC_PER_SEC;

	/* Divide both size (stored in 'rate') and ns by a common factor */
	while (ns > UINT_MAX) {
		rate >>= 1;
		ns >>= 1;
	}

	if (!ns)
		return;

	/* calculate the rate */
	do_div(rate, (uint32_t)ns);

	pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
		"Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
		(u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
}

224 225 226
static int pci_epf_test_copy(struct pci_epf_test *epf_test)
{
	int ret;
227
	bool use_dma;
228 229 230 231
	void __iomem *src_addr;
	void __iomem *dst_addr;
	phys_addr_t src_phys_addr;
	phys_addr_t dst_phys_addr;
232
	struct timespec64 start, end;
233 234 235
	struct pci_epf *epf = epf_test->epf;
	struct device *dev = &epf->dev;
	struct pci_epc *epc = epf->epc;
236 237
	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
238 239 240

	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
	if (!src_addr) {
241
		dev_err(dev, "Failed to allocate source address\n");
242 243 244 245 246
		reg->status = STATUS_SRC_ADDR_INVALID;
		ret = -ENOMEM;
		goto err;
	}

247 248
	ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
			       reg->size);
249
	if (ret) {
250
		dev_err(dev, "Failed to map source address\n");
251 252 253 254 255 256
		reg->status = STATUS_SRC_ADDR_INVALID;
		goto err_src_addr;
	}

	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
	if (!dst_addr) {
257
		dev_err(dev, "Failed to allocate destination address\n");
258 259 260 261 262
		reg->status = STATUS_DST_ADDR_INVALID;
		ret = -ENOMEM;
		goto err_src_map_addr;
	}

263 264
	ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
			       reg->size);
265
	if (ret) {
266
		dev_err(dev, "Failed to map destination address\n");
267 268 269 270
		reg->status = STATUS_DST_ADDR_INVALID;
		goto err_dst_addr;
	}

271
	ktime_get_ts64(&start);
272 273 274 275 276 277 278
	use_dma = !!(reg->flags & FLAG_USE_DMA);
	if (use_dma) {
		if (!epf_test->dma_supported) {
			dev_err(dev, "Cannot transfer data using DMA\n");
			ret = -EINVAL;
			goto err_map_addr;
		}
279

280 281 282 283 284 285 286
		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
						 src_phys_addr, reg->size);
		if (ret)
			dev_err(dev, "Data transfer failed\n");
	} else {
		memcpy(dst_addr, src_addr, reg->size);
	}
287 288
	ktime_get_ts64(&end);
	pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
289 290

err_map_addr:
291
	pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
292 293 294 295 296

err_dst_addr:
	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);

err_src_map_addr:
297
	pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
298 299 300 301 302 303 304 305 306 307 308 309 310 311

err_src_addr:
	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);

err:
	return ret;
}

static int pci_epf_test_read(struct pci_epf_test *epf_test)
{
	int ret;
	void __iomem *src_addr;
	void *buf;
	u32 crc32;
312
	bool use_dma;
313
	phys_addr_t phys_addr;
314
	phys_addr_t dst_phys_addr;
315
	struct timespec64 start, end;
316 317 318
	struct pci_epf *epf = epf_test->epf;
	struct device *dev = &epf->dev;
	struct pci_epc *epc = epf->epc;
319
	struct device *dma_dev = epf->epc->dev.parent;
320 321
	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
322 323 324

	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
	if (!src_addr) {
325
		dev_err(dev, "Failed to allocate address\n");
326 327 328 329 330
		reg->status = STATUS_SRC_ADDR_INVALID;
		ret = -ENOMEM;
		goto err;
	}

331 332
	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
			       reg->size);
333
	if (ret) {
334
		dev_err(dev, "Failed to map address\n");
335 336 337 338 339 340 341 342 343 344
		reg->status = STATUS_SRC_ADDR_INVALID;
		goto err_addr;
	}

	buf = kzalloc(reg->size, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_map_addr;
	}

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
	use_dma = !!(reg->flags & FLAG_USE_DMA);
	if (use_dma) {
		if (!epf_test->dma_supported) {
			dev_err(dev, "Cannot transfer data using DMA\n");
			ret = -EINVAL;
			goto err_dma_map;
		}

		dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
					       DMA_FROM_DEVICE);
		if (dma_mapping_error(dma_dev, dst_phys_addr)) {
			dev_err(dev, "Failed to map destination buffer addr\n");
			ret = -ENOMEM;
			goto err_dma_map;
		}

361
		ktime_get_ts64(&start);
362 363 364 365
		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
						 phys_addr, reg->size);
		if (ret)
			dev_err(dev, "Data transfer failed\n");
366
		ktime_get_ts64(&end);
367 368 369 370

		dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
				 DMA_FROM_DEVICE);
	} else {
371
		ktime_get_ts64(&start);
372
		memcpy_fromio(buf, src_addr, reg->size);
373
		ktime_get_ts64(&end);
374
	}
375

376 377
	pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);

378 379 380 381
	crc32 = crc32_le(~0, buf, reg->size);
	if (crc32 != reg->checksum)
		ret = -EIO;

382
err_dma_map:
383 384 385
	kfree(buf);

err_map_addr:
386
	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
387 388 389 390 391 392 393 394 395 396 397 398 399

err_addr:
	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);

err:
	return ret;
}

static int pci_epf_test_write(struct pci_epf_test *epf_test)
{
	int ret;
	void __iomem *dst_addr;
	void *buf;
400
	bool use_dma;
401
	phys_addr_t phys_addr;
402
	phys_addr_t src_phys_addr;
403
	struct timespec64 start, end;
404 405 406
	struct pci_epf *epf = epf_test->epf;
	struct device *dev = &epf->dev;
	struct pci_epc *epc = epf->epc;
407
	struct device *dma_dev = epf->epc->dev.parent;
408 409
	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
410 411 412

	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
	if (!dst_addr) {
413
		dev_err(dev, "Failed to allocate address\n");
414 415 416 417 418
		reg->status = STATUS_DST_ADDR_INVALID;
		ret = -ENOMEM;
		goto err;
	}

419 420
	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
			       reg->size);
421
	if (ret) {
422
		dev_err(dev, "Failed to map address\n");
423 424 425 426 427 428 429 430 431 432 433 434 435
		reg->status = STATUS_DST_ADDR_INVALID;
		goto err_addr;
	}

	buf = kzalloc(reg->size, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_map_addr;
	}

	get_random_bytes(buf, reg->size);
	reg->checksum = crc32_le(~0, buf, reg->size);

436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
	use_dma = !!(reg->flags & FLAG_USE_DMA);
	if (use_dma) {
		if (!epf_test->dma_supported) {
			dev_err(dev, "Cannot transfer data using DMA\n");
			ret = -EINVAL;
			goto err_map_addr;
		}

		src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
					       DMA_TO_DEVICE);
		if (dma_mapping_error(dma_dev, src_phys_addr)) {
			dev_err(dev, "Failed to map source buffer addr\n");
			ret = -ENOMEM;
			goto err_dma_map;
		}

452
		ktime_get_ts64(&start);
453 454 455 456
		ret = pci_epf_test_data_transfer(epf_test, phys_addr,
						 src_phys_addr, reg->size);
		if (ret)
			dev_err(dev, "Data transfer failed\n");
457
		ktime_get_ts64(&end);
458 459 460 461

		dma_unmap_single(dma_dev, src_phys_addr, reg->size,
				 DMA_TO_DEVICE);
	} else {
462
		ktime_get_ts64(&start);
463
		memcpy_toio(dst_addr, buf, reg->size);
464
		ktime_get_ts64(&end);
465
	}
466

467 468
	pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);

469 470 471 472
	/*
	 * wait 1ms inorder for the write to complete. Without this delay L3
	 * error in observed in the host system.
	 */
473
	usleep_range(1000, 2000);
474

475
err_dma_map:
476 477 478
	kfree(buf);

err_map_addr:
479
	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
480 481 482 483 484 485 486 487

err_addr:
	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);

err:
	return ret;
}

488 489
static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
				   u16 irq)
490 491
{
	struct pci_epf *epf = epf_test->epf;
492
	struct device *dev = &epf->dev;
493
	struct pci_epc *epc = epf->epc;
494 495
	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
496 497

	reg->status |= STATUS_IRQ_RAISED;
498 499 500

	switch (irq_type) {
	case IRQ_TYPE_LEGACY:
501
		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
502 503
		break;
	case IRQ_TYPE_MSI:
504
		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
505
		break;
506 507 508
	case IRQ_TYPE_MSIX:
		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
		break;
509 510 511 512
	default:
		dev_err(dev, "Failed to raise IRQ, unknown type\n");
		break;
	}
513 514 515 516 517
}

static void pci_epf_test_cmd_handler(struct work_struct *work)
{
	int ret;
518
	int count;
519
	u32 command;
520 521 522
	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
						     cmd_handler.work);
	struct pci_epf *epf = epf_test->epf;
523
	struct device *dev = &epf->dev;
524
	struct pci_epc *epc = epf->epc;
525 526
	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
527

528 529
	command = reg->command;
	if (!command)
530 531
		goto reset_handler;

532
	reg->command = 0;
533
	reg->status = 0;
534

535
	if (reg->irq_type > IRQ_TYPE_MSIX) {
536 537 538
		dev_err(dev, "Failed to detect IRQ type\n");
		goto reset_handler;
	}
539

540
	if (command & COMMAND_RAISE_LEGACY_IRQ) {
541
		reg->status = STATUS_IRQ_RAISED;
542
		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
543 544 545
		goto reset_handler;
	}

546
	if (command & COMMAND_WRITE) {
547 548 549 550 551
		ret = pci_epf_test_write(epf_test);
		if (ret)
			reg->status |= STATUS_WRITE_FAIL;
		else
			reg->status |= STATUS_WRITE_SUCCESS;
552 553
		pci_epf_test_raise_irq(epf_test, reg->irq_type,
				       reg->irq_number);
554 555 556
		goto reset_handler;
	}

557
	if (command & COMMAND_READ) {
558 559 560 561 562
		ret = pci_epf_test_read(epf_test);
		if (!ret)
			reg->status |= STATUS_READ_SUCCESS;
		else
			reg->status |= STATUS_READ_FAIL;
563 564
		pci_epf_test_raise_irq(epf_test, reg->irq_type,
				       reg->irq_number);
565 566 567
		goto reset_handler;
	}

568
	if (command & COMMAND_COPY) {
569 570 571 572 573
		ret = pci_epf_test_copy(epf_test);
		if (!ret)
			reg->status |= STATUS_COPY_SUCCESS;
		else
			reg->status |= STATUS_COPY_FAIL;
574 575
		pci_epf_test_raise_irq(epf_test, reg->irq_type,
				       reg->irq_number);
576 577 578
		goto reset_handler;
	}

579
	if (command & COMMAND_RAISE_MSI_IRQ) {
580 581
		count = pci_epc_get_msi(epc, epf->func_no);
		if (reg->irq_number > count || count <= 0)
582 583
			goto reset_handler;
		reg->status = STATUS_IRQ_RAISED;
584 585
		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
				  reg->irq_number);
586 587 588
		goto reset_handler;
	}

589 590 591 592 593 594 595 596 597 598
	if (command & COMMAND_RAISE_MSIX_IRQ) {
		count = pci_epc_get_msix(epc, epf->func_no);
		if (reg->irq_number > count || count <= 0)
			goto reset_handler;
		reg->status = STATUS_IRQ_RAISED;
		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
				  reg->irq_number);
		goto reset_handler;
	}

599 600 601 602 603 604 605 606 607
reset_handler:
	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
			   msecs_to_jiffies(1));
}

static void pci_epf_test_unbind(struct pci_epf *epf)
{
	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
	struct pci_epc *epc = epf->epc;
608
	struct pci_epf_bar *epf_bar;
609 610 611
	int bar;

	cancel_delayed_work(&epf_test->cmd_handler);
612
	pci_epf_test_clean_dma_chan(epf_test);
613
	pci_epc_stop(epc);
614
	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
615 616
		epf_bar = &epf->bar[bar];

617
		if (epf_test->reg[bar]) {
618
			pci_epc_clear_bar(epc, epf->func_no, epf_bar);
619
			pci_epf_free_space(epf, epf_test->reg[bar], bar);
620 621 622 623 624 625
		}
	}
}

static int pci_epf_test_set_bar(struct pci_epf *epf)
{
626
	int bar, add;
627 628 629 630 631
	int ret;
	struct pci_epf_bar *epf_bar;
	struct pci_epc *epc = epf->epc;
	struct device *dev = &epf->dev;
	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
632
	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
633 634 635
	const struct pci_epc_features *epc_features;

	epc_features = epf_test->epc_features;
636

637
	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
638
		epf_bar = &epf->bar[bar];
639 640 641 642 643 644
		/*
		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
		 * if the specific implementation required a 64-bit BAR,
		 * even if we only requested a 32-bit BAR.
		 */
		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
645

646 647 648
		if (!!(epc_features->reserved_bar & (1 << bar)))
			continue;

649
		ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
650 651
		if (ret) {
			pci_epf_free_space(epf, epf_test->reg[bar], bar);
652
			dev_err(dev, "Failed to set BAR%d\n", bar);
653
			if (bar == test_reg_bar)
654 655 656 657 658 659 660
				return ret;
		}
	}

	return 0;
}

661 662
static int pci_epf_test_core_init(struct pci_epf *epf)
{
663
	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	struct pci_epf_header *header = epf->header;
	const struct pci_epc_features *epc_features;
	struct pci_epc *epc = epf->epc;
	struct device *dev = &epf->dev;
	bool msix_capable = false;
	bool msi_capable = true;
	int ret;

	epc_features = pci_epc_get_features(epc, epf->func_no);
	if (epc_features) {
		msix_capable = epc_features->msix_capable;
		msi_capable = epc_features->msi_capable;
	}

	ret = pci_epc_write_header(epc, epf->func_no, header);
	if (ret) {
		dev_err(dev, "Configuration header write failed\n");
		return ret;
	}

	ret = pci_epf_test_set_bar(epf);
	if (ret)
		return ret;

	if (msi_capable) {
		ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
		if (ret) {
			dev_err(dev, "MSI configuration failed\n");
			return ret;
		}
	}

	if (msix_capable) {
697 698 699
		ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
				       epf_test->test_reg_bar,
				       epf_test->msix_table_offset);
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
		if (ret) {
			dev_err(dev, "MSI-X configuration failed\n");
			return ret;
		}
	}

	return 0;
}

static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
				 void *data)
{
	struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
	int ret;

	switch (val) {
	case CORE_INIT:
		ret = pci_epf_test_core_init(epf);
		if (ret)
			return NOTIFY_BAD;
		break;

	case LINK_UP:
		queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
				   msecs_to_jiffies(1));
		break;

	default:
		dev_err(&epf->dev, "Invalid EPF test notifier event\n");
		return NOTIFY_BAD;
	}

	return NOTIFY_OK;
}

736 737 738 739
static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
	struct device *dev = &epf->dev;
740
	struct pci_epf_bar *epf_bar;
741 742 743 744
	size_t msix_table_size = 0;
	size_t test_reg_bar_size;
	size_t pba_size = 0;
	bool msix_capable;
745
	void *base;
746
	int bar, add;
747
	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
748
	const struct pci_epc_features *epc_features;
749
	size_t test_reg_size;
750 751

	epc_features = epf_test->epc_features;
752

753 754 755 756 757 758 759 760 761 762 763 764 765 766
	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);

	msix_capable = epc_features->msix_capable;
	if (msix_capable) {
		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
		epf_test->msix_table_offset = test_reg_bar_size;
		/* Align to QWORD or 8 Bytes */
		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
	}
	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;

	if (epc_features->bar_fixed_size[test_reg_bar]) {
		if (test_reg_size > bar_size[test_reg_bar])
			return -ENOMEM;
767
		test_reg_size = bar_size[test_reg_bar];
768
	}
769

770 771
	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
				   epc_features->align);
772
	if (!base) {
773
		dev_err(dev, "Failed to allocated register space\n");
774 775
		return -ENOMEM;
	}
776
	epf_test->reg[test_reg_bar] = base;
777

778
	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
779
		epf_bar = &epf->bar[bar];
780 781
		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;

782 783
		if (bar == test_reg_bar)
			continue;
784 785 786 787

		if (!!(epc_features->reserved_bar & (1 << bar)))
			continue;

788 789
		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
					   epc_features->align);
790
		if (!base)
791
			dev_err(dev, "Failed to allocate space for BAR%d\n",
792 793 794 795 796 797 798
				bar);
		epf_test->reg[bar] = base;
	}

	return 0;
}

799 800 801 802 803 804 805
static void pci_epf_configure_bar(struct pci_epf *epf,
				  const struct pci_epc_features *epc_features)
{
	struct pci_epf_bar *epf_bar;
	bool bar_fixed_64bit;
	int i;

806
	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
807 808 809 810 811 812 813 814 815
		epf_bar = &epf->bar[i];
		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
		if (bar_fixed_64bit)
			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
		if (epc_features->bar_fixed_size[i])
			bar_size[i] = epc_features->bar_fixed_size[i];
	}
}

816 817 818
static int pci_epf_test_bind(struct pci_epf *epf)
{
	int ret;
819
	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
820 821
	const struct pci_epc_features *epc_features;
	enum pci_barno test_reg_bar = BAR_0;
822
	struct pci_epc *epc = epf->epc;
823
	bool linkup_notifier = false;
824
	bool core_init_notifier = false;
825 826 827 828

	if (WARN_ON_ONCE(!epc))
		return -EINVAL;

829 830 831
	epc_features = pci_epc_get_features(epc, epf->func_no);
	if (epc_features) {
		linkup_notifier = epc_features->linkup_notifier;
832
		core_init_notifier = epc_features->core_init_notifier;
833 834 835
		test_reg_bar = pci_epc_get_first_free_bar(epc_features);
		pci_epf_configure_bar(epf, epc_features);
	}
836

837 838
	epf_test->test_reg_bar = test_reg_bar;
	epf_test->epc_features = epc_features;
839

840 841 842 843
	ret = pci_epf_test_alloc_space(epf);
	if (ret)
		return ret;

844 845 846
	if (!core_init_notifier) {
		ret = pci_epf_test_core_init(epf);
		if (ret)
847 848 849
			return ret;
	}

850 851 852 853 854 855
	epf_test->dma_supported = true;

	ret = pci_epf_test_init_dma_chan(epf_test);
	if (ret)
		epf_test->dma_supported = false;

856 857 858 859
	if (linkup_notifier) {
		epf->nb.notifier_call = pci_epf_test_notifier;
		pci_epc_register_notifier(epc, &epf->nb);
	} else {
860
		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
861
	}
862

863 864 865
	return 0;
}

866 867 868 869 870 871 872
static const struct pci_epf_device_id pci_epf_test_ids[] = {
	{
		.name = "pci_epf_test",
	},
	{},
};

873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
static int pci_epf_test_probe(struct pci_epf *epf)
{
	struct pci_epf_test *epf_test;
	struct device *dev = &epf->dev;

	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
	if (!epf_test)
		return -ENOMEM;

	epf->header = &test_header;
	epf_test->epf = epf;

	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);

	epf_set_drvdata(epf, epf_test);
	return 0;
}

static struct pci_epf_ops ops = {
	.unbind	= pci_epf_test_unbind,
	.bind	= pci_epf_test_bind,
};

static struct pci_epf_driver test_driver = {
	.driver.name	= "pci_epf_test",
	.probe		= pci_epf_test_probe,
	.id_table	= pci_epf_test_ids,
	.ops		= &ops,
	.owner		= THIS_MODULE,
};

static int __init pci_epf_test_init(void)
{
	int ret;

	kpcitest_workqueue = alloc_workqueue("kpcitest",
					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
910 911 912 913 914
	if (!kpcitest_workqueue) {
		pr_err("Failed to allocate the kpcitest work queue\n");
		return -ENOMEM;
	}

915 916
	ret = pci_epf_register_driver(&test_driver);
	if (ret) {
917
		pr_err("Failed to register pci epf test driver --> %d\n", ret);
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
		return ret;
	}

	return 0;
}
module_init(pci_epf_test_init);

static void __exit pci_epf_test_exit(void)
{
	pci_epf_unregister_driver(&test_driver);
}
module_exit(pci_epf_test_exit);

MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
MODULE_LICENSE("GPL v2");