1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci_ids.h>
16 #include <linux/random.h>
17 
18 #include <linux/pci-epc.h>
19 #include <linux/pci-epf.h>
20 #include <linux/pci_regs.h>
21 
22 #define IRQ_TYPE_INTX			0
23 #define IRQ_TYPE_MSI			1
24 #define IRQ_TYPE_MSIX			2
25 
26 #define COMMAND_RAISE_INTX_IRQ		BIT(0)
27 #define COMMAND_RAISE_MSI_IRQ		BIT(1)
28 #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
29 #define COMMAND_READ			BIT(3)
30 #define COMMAND_WRITE			BIT(4)
31 #define COMMAND_COPY			BIT(5)
32 
33 #define STATUS_READ_SUCCESS		BIT(0)
34 #define STATUS_READ_FAIL		BIT(1)
35 #define STATUS_WRITE_SUCCESS		BIT(2)
36 #define STATUS_WRITE_FAIL		BIT(3)
37 #define STATUS_COPY_SUCCESS		BIT(4)
38 #define STATUS_COPY_FAIL		BIT(5)
39 #define STATUS_IRQ_RAISED		BIT(6)
40 #define STATUS_SRC_ADDR_INVALID		BIT(7)
41 #define STATUS_DST_ADDR_INVALID		BIT(8)
42 
43 #define FLAG_USE_DMA			BIT(0)
44 
45 #define TIMER_RESOLUTION		1
46 
47 #define CAP_UNALIGNED_ACCESS		BIT(0)
48 #define CAP_MSI				BIT(1)
49 #define CAP_MSIX			BIT(2)
50 #define CAP_INTX			BIT(3)
51 
52 static struct workqueue_struct *kpcitest_workqueue;
53 
54 struct pci_epf_test {
55 	void			*reg[PCI_STD_NUM_BARS];
56 	struct pci_epf		*epf;
57 	enum pci_barno		test_reg_bar;
58 	size_t			msix_table_offset;
59 	struct delayed_work	cmd_handler;
60 	struct dma_chan		*dma_chan_tx;
61 	struct dma_chan		*dma_chan_rx;
62 	struct dma_chan		*transfer_chan;
63 	dma_cookie_t		transfer_cookie;
64 	enum dma_status		transfer_status;
65 	struct completion	transfer_complete;
66 	bool			dma_supported;
67 	bool			dma_private;
68 	const struct pci_epc_features *epc_features;
69 };
70 
71 struct pci_epf_test_reg {
72 	__le32 magic;
73 	__le32 command;
74 	__le32 status;
75 	__le64 src_addr;
76 	__le64 dst_addr;
77 	__le32 size;
78 	__le32 checksum;
79 	__le32 irq_type;
80 	__le32 irq_number;
81 	__le32 flags;
82 	__le32 caps;
83 } __packed;
84 
85 static struct pci_epf_header test_header = {
86 	.vendorid	= PCI_ANY_ID,
87 	.deviceid	= PCI_ANY_ID,
88 	.baseclass_code = PCI_CLASS_OTHERS,
89 	.interrupt_pin	= PCI_INTERRUPT_INTA,
90 };
91 
92 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
93 
pci_epf_test_dma_callback(void * param)94 static void pci_epf_test_dma_callback(void *param)
95 {
96 	struct pci_epf_test *epf_test = param;
97 	struct dma_tx_state state;
98 
99 	epf_test->transfer_status =
100 		dmaengine_tx_status(epf_test->transfer_chan,
101 				    epf_test->transfer_cookie, &state);
102 	if (epf_test->transfer_status == DMA_COMPLETE ||
103 	    epf_test->transfer_status == DMA_ERROR)
104 		complete(&epf_test->transfer_complete);
105 }
106 
107 /**
108  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
109  *				  data between PCIe EP and remote PCIe RC
110  * @epf_test: the EPF test device that performs the data transfer operation
111  * @dma_dst: The destination address of the data transfer. It can be a physical
112  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
113  * @dma_src: The source address of the data transfer. It can be a physical
114  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
115  * @len: The size of the data transfer
116  * @dma_remote: remote RC physical address
117  * @dir: DMA transfer direction
118  *
119  * Function that uses dmaengine API to transfer data between PCIe EP and remote
120  * PCIe RC. The source and destination address can be a physical address given
121  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
122  *
123  * The function returns '0' on success and negative value on failure.
124  */
pci_epf_test_data_transfer(struct pci_epf_test * epf_test,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,dma_addr_t dma_remote,enum dma_transfer_direction dir)125 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
126 				      dma_addr_t dma_dst, dma_addr_t dma_src,
127 				      size_t len, dma_addr_t dma_remote,
128 				      enum dma_transfer_direction dir)
129 {
130 	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
131 				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
132 	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
133 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
134 	struct pci_epf *epf = epf_test->epf;
135 	struct dma_async_tx_descriptor *tx;
136 	struct dma_slave_config sconf = {};
137 	struct device *dev = &epf->dev;
138 	int ret;
139 
140 	if (IS_ERR_OR_NULL(chan)) {
141 		dev_err(dev, "Invalid DMA memcpy channel\n");
142 		return -EINVAL;
143 	}
144 
145 	if (epf_test->dma_private) {
146 		sconf.direction = dir;
147 		if (dir == DMA_MEM_TO_DEV)
148 			sconf.dst_addr = dma_remote;
149 		else
150 			sconf.src_addr = dma_remote;
151 
152 		if (dmaengine_slave_config(chan, &sconf)) {
153 			dev_err(dev, "DMA slave config fail\n");
154 			return -EIO;
155 		}
156 		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
157 						 flags);
158 	} else {
159 		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
160 					       flags);
161 	}
162 
163 	if (!tx) {
164 		dev_err(dev, "Failed to prepare DMA memcpy\n");
165 		return -EIO;
166 	}
167 
168 	reinit_completion(&epf_test->transfer_complete);
169 	epf_test->transfer_chan = chan;
170 	tx->callback = pci_epf_test_dma_callback;
171 	tx->callback_param = epf_test;
172 	epf_test->transfer_cookie = dmaengine_submit(tx);
173 
174 	ret = dma_submit_error(epf_test->transfer_cookie);
175 	if (ret) {
176 		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
177 		goto terminate;
178 	}
179 
180 	dma_async_issue_pending(chan);
181 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
182 	if (ret < 0) {
183 		dev_err(dev, "DMA wait_for_completion interrupted\n");
184 		goto terminate;
185 	}
186 
187 	if (epf_test->transfer_status == DMA_ERROR) {
188 		dev_err(dev, "DMA transfer failed\n");
189 		ret = -EIO;
190 	}
191 
192 terminate:
193 	dmaengine_terminate_sync(chan);
194 
195 	return ret;
196 }
197 
198 struct epf_dma_filter {
199 	struct device *dev;
200 	u32 dma_mask;
201 };
202 
epf_dma_filter_fn(struct dma_chan * chan,void * node)203 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
204 {
205 	struct epf_dma_filter *filter = node;
206 	struct dma_slave_caps caps;
207 
208 	memset(&caps, 0, sizeof(caps));
209 	dma_get_slave_caps(chan, &caps);
210 
211 	return chan->device->dev == filter->dev
212 		&& (filter->dma_mask & caps.directions);
213 }
214 
215 /**
216  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
217  * @epf_test: the EPF test device that performs data transfer operation
218  *
219  * Function to initialize EPF test DMA channel.
220  */
pci_epf_test_init_dma_chan(struct pci_epf_test * epf_test)221 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
222 {
223 	struct pci_epf *epf = epf_test->epf;
224 	struct device *dev = &epf->dev;
225 	struct epf_dma_filter filter;
226 	struct dma_chan *dma_chan;
227 	dma_cap_mask_t mask;
228 	int ret;
229 
230 	filter.dev = epf->epc->dev.parent;
231 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
232 
233 	dma_cap_zero(mask);
234 	dma_cap_set(DMA_SLAVE, mask);
235 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
236 	if (!dma_chan) {
237 		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
238 		goto fail_back_tx;
239 	}
240 
241 	epf_test->dma_chan_rx = dma_chan;
242 
243 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
244 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
245 
246 	if (!dma_chan) {
247 		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
248 		goto fail_back_rx;
249 	}
250 
251 	epf_test->dma_chan_tx = dma_chan;
252 	epf_test->dma_private = true;
253 
254 	init_completion(&epf_test->transfer_complete);
255 
256 	return 0;
257 
258 fail_back_rx:
259 	dma_release_channel(epf_test->dma_chan_rx);
260 	epf_test->dma_chan_rx = NULL;
261 
262 fail_back_tx:
263 	dma_cap_zero(mask);
264 	dma_cap_set(DMA_MEMCPY, mask);
265 
266 	dma_chan = dma_request_chan_by_mask(&mask);
267 	if (IS_ERR(dma_chan)) {
268 		ret = PTR_ERR(dma_chan);
269 		if (ret != -EPROBE_DEFER)
270 			dev_err(dev, "Failed to get DMA channel\n");
271 		return ret;
272 	}
273 	init_completion(&epf_test->transfer_complete);
274 
275 	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
276 
277 	return 0;
278 }
279 
280 /**
281  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
282  * @epf_test: the EPF test device that performs data transfer operation
283  *
284  * Helper to cleanup EPF test DMA channel.
285  */
pci_epf_test_clean_dma_chan(struct pci_epf_test * epf_test)286 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
287 {
288 	if (!epf_test->dma_supported)
289 		return;
290 
291 	dma_release_channel(epf_test->dma_chan_tx);
292 	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
293 		epf_test->dma_chan_tx = NULL;
294 		epf_test->dma_chan_rx = NULL;
295 		return;
296 	}
297 
298 	dma_release_channel(epf_test->dma_chan_rx);
299 	epf_test->dma_chan_rx = NULL;
300 }
301 
pci_epf_test_print_rate(struct pci_epf_test * epf_test,const char * op,u64 size,struct timespec64 * start,struct timespec64 * end,bool dma)302 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
303 				    const char *op, u64 size,
304 				    struct timespec64 *start,
305 				    struct timespec64 *end, bool dma)
306 {
307 	struct timespec64 ts = timespec64_sub(*end, *start);
308 	u64 rate = 0, ns;
309 
310 	/* calculate the rate */
311 	ns = timespec64_to_ns(&ts);
312 	if (ns)
313 		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
314 
315 	dev_info(&epf_test->epf->dev,
316 		 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
317 		 op, size, dma ? "YES" : "NO",
318 		 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
319 }
320 
pci_epf_test_copy(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)321 static void pci_epf_test_copy(struct pci_epf_test *epf_test,
322 			      struct pci_epf_test_reg *reg)
323 {
324 	int ret = 0;
325 	struct timespec64 start, end;
326 	struct pci_epf *epf = epf_test->epf;
327 	struct pci_epc *epc = epf->epc;
328 	struct device *dev = &epf->dev;
329 	struct pci_epc_map src_map, dst_map;
330 	u64 src_addr = le64_to_cpu(reg->src_addr);
331 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
332 	size_t orig_size, copy_size;
333 	ssize_t map_size = 0;
334 	u32 flags = le32_to_cpu(reg->flags);
335 	u32 status = 0;
336 	void *copy_buf = NULL, *buf;
337 
338 	orig_size = copy_size = le32_to_cpu(reg->size);
339 
340 	if (flags & FLAG_USE_DMA) {
341 		if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
342 			dev_err(dev, "DMA controller doesn't support MEMCPY\n");
343 			ret = -EINVAL;
344 			goto set_status;
345 		}
346 	} else {
347 		copy_buf = kzalloc(copy_size, GFP_KERNEL);
348 		if (!copy_buf) {
349 			ret = -ENOMEM;
350 			goto set_status;
351 		}
352 		buf = copy_buf;
353 	}
354 
355 	while (copy_size) {
356 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
357 				      src_addr, copy_size, &src_map);
358 		if (ret) {
359 			dev_err(dev, "Failed to map source address\n");
360 			status = STATUS_SRC_ADDR_INVALID;
361 			goto free_buf;
362 		}
363 
364 		ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
365 					   dst_addr, copy_size, &dst_map);
366 		if (ret) {
367 			dev_err(dev, "Failed to map destination address\n");
368 			status = STATUS_DST_ADDR_INVALID;
369 			pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
370 					  &src_map);
371 			goto free_buf;
372 		}
373 
374 		map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
375 
376 		ktime_get_ts64(&start);
377 		if (flags & FLAG_USE_DMA) {
378 			ret = pci_epf_test_data_transfer(epf_test,
379 					dst_map.phys_addr, src_map.phys_addr,
380 					map_size, 0, DMA_MEM_TO_MEM);
381 			if (ret) {
382 				dev_err(dev, "Data transfer failed\n");
383 				goto unmap;
384 			}
385 		} else {
386 			memcpy_fromio(buf, src_map.virt_addr, map_size);
387 			memcpy_toio(dst_map.virt_addr, buf, map_size);
388 			buf += map_size;
389 		}
390 		ktime_get_ts64(&end);
391 
392 		copy_size -= map_size;
393 		src_addr += map_size;
394 		dst_addr += map_size;
395 
396 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
397 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
398 		map_size = 0;
399 	}
400 
401 	pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
402 				flags & FLAG_USE_DMA);
403 
404 unmap:
405 	if (map_size) {
406 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
407 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
408 	}
409 
410 free_buf:
411 	kfree(copy_buf);
412 
413 set_status:
414 	if (!ret)
415 		status |= STATUS_COPY_SUCCESS;
416 	else
417 		status |= STATUS_COPY_FAIL;
418 	reg->status = cpu_to_le32(status);
419 }
420 
pci_epf_test_read(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)421 static void pci_epf_test_read(struct pci_epf_test *epf_test,
422 			      struct pci_epf_test_reg *reg)
423 {
424 	int ret = 0;
425 	void *src_buf, *buf;
426 	u32 crc32;
427 	struct pci_epc_map map;
428 	phys_addr_t dst_phys_addr;
429 	struct timespec64 start, end;
430 	struct pci_epf *epf = epf_test->epf;
431 	struct pci_epc *epc = epf->epc;
432 	struct device *dev = &epf->dev;
433 	struct device *dma_dev = epf->epc->dev.parent;
434 	u64 src_addr = le64_to_cpu(reg->src_addr);
435 	size_t orig_size, src_size;
436 	ssize_t map_size = 0;
437 	u32 flags = le32_to_cpu(reg->flags);
438 	u32 checksum = le32_to_cpu(reg->checksum);
439 	u32 status = 0;
440 
441 	orig_size = src_size = le32_to_cpu(reg->size);
442 
443 	src_buf = kzalloc(src_size, GFP_KERNEL);
444 	if (!src_buf) {
445 		ret = -ENOMEM;
446 		goto set_status;
447 	}
448 	buf = src_buf;
449 
450 	while (src_size) {
451 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
452 					   src_addr, src_size, &map);
453 		if (ret) {
454 			dev_err(dev, "Failed to map address\n");
455 			status = STATUS_SRC_ADDR_INVALID;
456 			goto free_buf;
457 		}
458 
459 		map_size = map.pci_size;
460 		if (flags & FLAG_USE_DMA) {
461 			dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
462 						       DMA_FROM_DEVICE);
463 			if (dma_mapping_error(dma_dev, dst_phys_addr)) {
464 				dev_err(dev,
465 					"Failed to map destination buffer addr\n");
466 				ret = -ENOMEM;
467 				goto unmap;
468 			}
469 
470 			ktime_get_ts64(&start);
471 			ret = pci_epf_test_data_transfer(epf_test,
472 					dst_phys_addr, map.phys_addr,
473 					map_size, src_addr, DMA_DEV_TO_MEM);
474 			if (ret)
475 				dev_err(dev, "Data transfer failed\n");
476 			ktime_get_ts64(&end);
477 
478 			dma_unmap_single(dma_dev, dst_phys_addr, map_size,
479 					 DMA_FROM_DEVICE);
480 
481 			if (ret)
482 				goto unmap;
483 		} else {
484 			ktime_get_ts64(&start);
485 			memcpy_fromio(buf, map.virt_addr, map_size);
486 			ktime_get_ts64(&end);
487 		}
488 
489 		src_size -= map_size;
490 		src_addr += map_size;
491 		buf += map_size;
492 
493 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
494 		map_size = 0;
495 	}
496 
497 	pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
498 				flags & FLAG_USE_DMA);
499 
500 	crc32 = crc32_le(~0, src_buf, orig_size);
501 	if (crc32 != checksum)
502 		ret = -EIO;
503 
504 unmap:
505 	if (map_size)
506 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
507 
508 free_buf:
509 	kfree(src_buf);
510 
511 set_status:
512 	if (!ret)
513 		status |= STATUS_READ_SUCCESS;
514 	else
515 		status |= STATUS_READ_FAIL;
516 	reg->status = cpu_to_le32(status);
517 }
518 
pci_epf_test_write(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)519 static void pci_epf_test_write(struct pci_epf_test *epf_test,
520 			       struct pci_epf_test_reg *reg)
521 {
522 	int ret = 0;
523 	void *dst_buf, *buf;
524 	struct pci_epc_map map;
525 	phys_addr_t src_phys_addr;
526 	struct timespec64 start, end;
527 	struct pci_epf *epf = epf_test->epf;
528 	struct pci_epc *epc = epf->epc;
529 	struct device *dev = &epf->dev;
530 	struct device *dma_dev = epf->epc->dev.parent;
531 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
532 	size_t orig_size, dst_size;
533 	ssize_t map_size = 0;
534 	u32 flags = le32_to_cpu(reg->flags);
535 	u32 status = 0;
536 
537 	orig_size = dst_size = le32_to_cpu(reg->size);
538 
539 	dst_buf = kzalloc(dst_size, GFP_KERNEL);
540 	if (!dst_buf) {
541 		ret = -ENOMEM;
542 		goto set_status;
543 	}
544 	get_random_bytes(dst_buf, dst_size);
545 	reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
546 	buf = dst_buf;
547 
548 	while (dst_size) {
549 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
550 					   dst_addr, dst_size, &map);
551 		if (ret) {
552 			dev_err(dev, "Failed to map address\n");
553 			status = STATUS_DST_ADDR_INVALID;
554 			goto free_buf;
555 		}
556 
557 		map_size = map.pci_size;
558 		if (flags & FLAG_USE_DMA) {
559 			src_phys_addr = dma_map_single(dma_dev, buf, map_size,
560 						       DMA_TO_DEVICE);
561 			if (dma_mapping_error(dma_dev, src_phys_addr)) {
562 				dev_err(dev,
563 					"Failed to map source buffer addr\n");
564 				ret = -ENOMEM;
565 				goto unmap;
566 			}
567 
568 			ktime_get_ts64(&start);
569 
570 			ret = pci_epf_test_data_transfer(epf_test,
571 						map.phys_addr, src_phys_addr,
572 						map_size, dst_addr,
573 						DMA_MEM_TO_DEV);
574 			if (ret)
575 				dev_err(dev, "Data transfer failed\n");
576 			ktime_get_ts64(&end);
577 
578 			dma_unmap_single(dma_dev, src_phys_addr, map_size,
579 					 DMA_TO_DEVICE);
580 
581 			if (ret)
582 				goto unmap;
583 		} else {
584 			ktime_get_ts64(&start);
585 			memcpy_toio(map.virt_addr, buf, map_size);
586 			ktime_get_ts64(&end);
587 		}
588 
589 		dst_size -= map_size;
590 		dst_addr += map_size;
591 		buf += map_size;
592 
593 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
594 		map_size = 0;
595 	}
596 
597 	pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
598 				flags & FLAG_USE_DMA);
599 
600 	/*
601 	 * wait 1ms inorder for the write to complete. Without this delay L3
602 	 * error in observed in the host system.
603 	 */
604 	usleep_range(1000, 2000);
605 
606 unmap:
607 	if (map_size)
608 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
609 
610 free_buf:
611 	kfree(dst_buf);
612 
613 set_status:
614 	if (!ret)
615 		status |= STATUS_WRITE_SUCCESS;
616 	else
617 		status |= STATUS_WRITE_FAIL;
618 	reg->status = cpu_to_le32(status);
619 }
620 
pci_epf_test_raise_irq(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)621 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
622 				   struct pci_epf_test_reg *reg)
623 {
624 	struct pci_epf *epf = epf_test->epf;
625 	struct device *dev = &epf->dev;
626 	struct pci_epc *epc = epf->epc;
627 	u32 status = le32_to_cpu(reg->status);
628 	u32 irq_number = le32_to_cpu(reg->irq_number);
629 	u32 irq_type = le32_to_cpu(reg->irq_type);
630 	int count;
631 
632 	/*
633 	 * Set the status before raising the IRQ to ensure that the host sees
634 	 * the updated value when it gets the IRQ.
635 	 */
636 	status |= STATUS_IRQ_RAISED;
637 	WRITE_ONCE(reg->status, cpu_to_le32(status));
638 
639 	switch (irq_type) {
640 	case IRQ_TYPE_INTX:
641 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
642 				  PCI_IRQ_INTX, 0);
643 		break;
644 	case IRQ_TYPE_MSI:
645 		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
646 		if (irq_number > count || count <= 0) {
647 			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
648 				irq_number, count);
649 			return;
650 		}
651 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
652 				  PCI_IRQ_MSI, irq_number);
653 		break;
654 	case IRQ_TYPE_MSIX:
655 		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
656 		if (irq_number > count || count <= 0) {
657 			dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
658 				irq_number, count);
659 			return;
660 		}
661 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
662 				  PCI_IRQ_MSIX, irq_number);
663 		break;
664 	default:
665 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
666 		break;
667 	}
668 }
669 
pci_epf_test_cmd_handler(struct work_struct * work)670 static void pci_epf_test_cmd_handler(struct work_struct *work)
671 {
672 	u32 command;
673 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
674 						     cmd_handler.work);
675 	struct pci_epf *epf = epf_test->epf;
676 	struct device *dev = &epf->dev;
677 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
678 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
679 	u32 irq_type = le32_to_cpu(reg->irq_type);
680 
681 	command = le32_to_cpu(READ_ONCE(reg->command));
682 	if (!command)
683 		goto reset_handler;
684 
685 	WRITE_ONCE(reg->command, 0);
686 	WRITE_ONCE(reg->status, 0);
687 
688 	if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
689 	    !epf_test->dma_supported) {
690 		dev_err(dev, "Cannot transfer data using DMA\n");
691 		goto reset_handler;
692 	}
693 
694 	if (irq_type > IRQ_TYPE_MSIX) {
695 		dev_err(dev, "Failed to detect IRQ type\n");
696 		goto reset_handler;
697 	}
698 
699 	switch (command) {
700 	case COMMAND_RAISE_INTX_IRQ:
701 	case COMMAND_RAISE_MSI_IRQ:
702 	case COMMAND_RAISE_MSIX_IRQ:
703 		pci_epf_test_raise_irq(epf_test, reg);
704 		break;
705 	case COMMAND_WRITE:
706 		pci_epf_test_write(epf_test, reg);
707 		pci_epf_test_raise_irq(epf_test, reg);
708 		break;
709 	case COMMAND_READ:
710 		pci_epf_test_read(epf_test, reg);
711 		pci_epf_test_raise_irq(epf_test, reg);
712 		break;
713 	case COMMAND_COPY:
714 		pci_epf_test_copy(epf_test, reg);
715 		pci_epf_test_raise_irq(epf_test, reg);
716 		break;
717 	default:
718 		dev_err(dev, "Invalid command 0x%x\n", command);
719 		break;
720 	}
721 
722 reset_handler:
723 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
724 			   msecs_to_jiffies(1));
725 }
726 
pci_epf_test_set_bar(struct pci_epf * epf)727 static int pci_epf_test_set_bar(struct pci_epf *epf)
728 {
729 	int bar, ret;
730 	struct pci_epc *epc = epf->epc;
731 	struct device *dev = &epf->dev;
732 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
733 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
734 
735 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
736 		if (!epf_test->reg[bar])
737 			continue;
738 
739 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
740 				      &epf->bar[bar]);
741 		if (ret) {
742 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
743 					   PRIMARY_INTERFACE);
744 			epf_test->reg[bar] = NULL;
745 			dev_err(dev, "Failed to set BAR%d\n", bar);
746 			if (bar == test_reg_bar)
747 				return ret;
748 		}
749 	}
750 
751 	return 0;
752 }
753 
pci_epf_test_clear_bar(struct pci_epf * epf)754 static void pci_epf_test_clear_bar(struct pci_epf *epf)
755 {
756 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
757 	struct pci_epc *epc = epf->epc;
758 	int bar;
759 
760 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
761 		if (!epf_test->reg[bar])
762 			continue;
763 
764 		pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
765 				  &epf->bar[bar]);
766 	}
767 }
768 
pci_epf_test_set_capabilities(struct pci_epf * epf)769 static void pci_epf_test_set_capabilities(struct pci_epf *epf)
770 {
771 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
772 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
773 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
774 	struct pci_epc *epc = epf->epc;
775 	u32 caps = 0;
776 
777 	if (epc->ops->align_addr)
778 		caps |= CAP_UNALIGNED_ACCESS;
779 
780 	if (epf_test->epc_features->msi_capable)
781 		caps |= CAP_MSI;
782 
783 	if (epf_test->epc_features->msix_capable)
784 		caps |= CAP_MSIX;
785 
786 	if (epf_test->epc_features->intx_capable)
787 		caps |= CAP_INTX;
788 
789 	reg->caps = cpu_to_le32(caps);
790 }
791 
pci_epf_test_epc_init(struct pci_epf * epf)792 static int pci_epf_test_epc_init(struct pci_epf *epf)
793 {
794 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
795 	struct pci_epf_header *header = epf->header;
796 	const struct pci_epc_features *epc_features = epf_test->epc_features;
797 	struct pci_epc *epc = epf->epc;
798 	struct device *dev = &epf->dev;
799 	bool linkup_notifier = false;
800 	int ret;
801 
802 	epf_test->dma_supported = true;
803 
804 	ret = pci_epf_test_init_dma_chan(epf_test);
805 	if (ret)
806 		epf_test->dma_supported = false;
807 
808 	if (epf->vfunc_no <= 1) {
809 		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
810 		if (ret) {
811 			dev_err(dev, "Configuration header write failed\n");
812 			return ret;
813 		}
814 	}
815 
816 	pci_epf_test_set_capabilities(epf);
817 
818 	ret = pci_epf_test_set_bar(epf);
819 	if (ret)
820 		return ret;
821 
822 	if (epc_features->msi_capable) {
823 		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
824 				      epf->msi_interrupts);
825 		if (ret) {
826 			dev_err(dev, "MSI configuration failed\n");
827 			return ret;
828 		}
829 	}
830 
831 	if (epc_features->msix_capable) {
832 		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
833 				       epf->msix_interrupts,
834 				       epf_test->test_reg_bar,
835 				       epf_test->msix_table_offset);
836 		if (ret) {
837 			dev_err(dev, "MSI-X configuration failed\n");
838 			return ret;
839 		}
840 	}
841 
842 	linkup_notifier = epc_features->linkup_notifier;
843 	if (!linkup_notifier)
844 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
845 
846 	return 0;
847 }
848 
pci_epf_test_epc_deinit(struct pci_epf * epf)849 static void pci_epf_test_epc_deinit(struct pci_epf *epf)
850 {
851 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
852 
853 	cancel_delayed_work_sync(&epf_test->cmd_handler);
854 	pci_epf_test_clean_dma_chan(epf_test);
855 	pci_epf_test_clear_bar(epf);
856 }
857 
pci_epf_test_link_up(struct pci_epf * epf)858 static int pci_epf_test_link_up(struct pci_epf *epf)
859 {
860 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
861 
862 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
863 			   msecs_to_jiffies(1));
864 
865 	return 0;
866 }
867 
pci_epf_test_link_down(struct pci_epf * epf)868 static int pci_epf_test_link_down(struct pci_epf *epf)
869 {
870 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
871 
872 	cancel_delayed_work_sync(&epf_test->cmd_handler);
873 
874 	return 0;
875 }
876 
877 static const struct pci_epc_event_ops pci_epf_test_event_ops = {
878 	.epc_init = pci_epf_test_epc_init,
879 	.epc_deinit = pci_epf_test_epc_deinit,
880 	.link_up = pci_epf_test_link_up,
881 	.link_down = pci_epf_test_link_down,
882 };
883 
pci_epf_test_alloc_space(struct pci_epf * epf)884 static int pci_epf_test_alloc_space(struct pci_epf *epf)
885 {
886 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
887 	struct device *dev = &epf->dev;
888 	size_t msix_table_size = 0;
889 	size_t test_reg_bar_size;
890 	size_t pba_size = 0;
891 	void *base;
892 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
893 	enum pci_barno bar;
894 	const struct pci_epc_features *epc_features = epf_test->epc_features;
895 	size_t test_reg_size;
896 
897 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
898 
899 	if (epc_features->msix_capable) {
900 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
901 		epf_test->msix_table_offset = test_reg_bar_size;
902 		/* Align to QWORD or 8 Bytes */
903 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
904 	}
905 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
906 
907 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
908 				   epc_features, PRIMARY_INTERFACE);
909 	if (!base) {
910 		dev_err(dev, "Failed to allocated register space\n");
911 		return -ENOMEM;
912 	}
913 	epf_test->reg[test_reg_bar] = base;
914 
915 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
916 		bar = pci_epc_get_next_free_bar(epc_features, bar);
917 		if (bar == NO_BAR)
918 			break;
919 
920 		if (bar == test_reg_bar)
921 			continue;
922 
923 		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
924 					   epc_features, PRIMARY_INTERFACE);
925 		if (!base)
926 			dev_err(dev, "Failed to allocate space for BAR%d\n",
927 				bar);
928 		epf_test->reg[bar] = base;
929 	}
930 
931 	return 0;
932 }
933 
pci_epf_test_free_space(struct pci_epf * epf)934 static void pci_epf_test_free_space(struct pci_epf *epf)
935 {
936 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
937 	int bar;
938 
939 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
940 		if (!epf_test->reg[bar])
941 			continue;
942 
943 		pci_epf_free_space(epf, epf_test->reg[bar], bar,
944 				   PRIMARY_INTERFACE);
945 		epf_test->reg[bar] = NULL;
946 	}
947 }
948 
pci_epf_test_bind(struct pci_epf * epf)949 static int pci_epf_test_bind(struct pci_epf *epf)
950 {
951 	int ret;
952 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
953 	const struct pci_epc_features *epc_features;
954 	enum pci_barno test_reg_bar = BAR_0;
955 	struct pci_epc *epc = epf->epc;
956 
957 	if (WARN_ON_ONCE(!epc))
958 		return -EINVAL;
959 
960 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
961 	if (!epc_features) {
962 		dev_err(&epf->dev, "epc_features not implemented\n");
963 		return -EOPNOTSUPP;
964 	}
965 
966 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
967 	if (test_reg_bar < 0)
968 		return -EINVAL;
969 
970 	epf_test->test_reg_bar = test_reg_bar;
971 	epf_test->epc_features = epc_features;
972 
973 	ret = pci_epf_test_alloc_space(epf);
974 	if (ret)
975 		return ret;
976 
977 	return 0;
978 }
979 
pci_epf_test_unbind(struct pci_epf * epf)980 static void pci_epf_test_unbind(struct pci_epf *epf)
981 {
982 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
983 	struct pci_epc *epc = epf->epc;
984 
985 	cancel_delayed_work_sync(&epf_test->cmd_handler);
986 	if (epc->init_complete) {
987 		pci_epf_test_clean_dma_chan(epf_test);
988 		pci_epf_test_clear_bar(epf);
989 	}
990 	pci_epf_test_free_space(epf);
991 }
992 
993 static const struct pci_epf_device_id pci_epf_test_ids[] = {
994 	{
995 		.name = "pci_epf_test",
996 	},
997 	{},
998 };
999 
pci_epf_test_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)1000 static int pci_epf_test_probe(struct pci_epf *epf,
1001 			      const struct pci_epf_device_id *id)
1002 {
1003 	struct pci_epf_test *epf_test;
1004 	struct device *dev = &epf->dev;
1005 
1006 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
1007 	if (!epf_test)
1008 		return -ENOMEM;
1009 
1010 	epf->header = &test_header;
1011 	epf_test->epf = epf;
1012 
1013 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
1014 
1015 	epf->event_ops = &pci_epf_test_event_ops;
1016 
1017 	epf_set_drvdata(epf, epf_test);
1018 	return 0;
1019 }
1020 
1021 static const struct pci_epf_ops ops = {
1022 	.unbind	= pci_epf_test_unbind,
1023 	.bind	= pci_epf_test_bind,
1024 };
1025 
1026 static struct pci_epf_driver test_driver = {
1027 	.driver.name	= "pci_epf_test",
1028 	.probe		= pci_epf_test_probe,
1029 	.id_table	= pci_epf_test_ids,
1030 	.ops		= &ops,
1031 	.owner		= THIS_MODULE,
1032 };
1033 
pci_epf_test_init(void)1034 static int __init pci_epf_test_init(void)
1035 {
1036 	int ret;
1037 
1038 	kpcitest_workqueue = alloc_workqueue("kpcitest",
1039 					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1040 	if (!kpcitest_workqueue) {
1041 		pr_err("Failed to allocate the kpcitest work queue\n");
1042 		return -ENOMEM;
1043 	}
1044 
1045 	ret = pci_epf_register_driver(&test_driver);
1046 	if (ret) {
1047 		destroy_workqueue(kpcitest_workqueue);
1048 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1049 		return ret;
1050 	}
1051 
1052 	return 0;
1053 }
1054 module_init(pci_epf_test_init);
1055 
pci_epf_test_exit(void)1056 static void __exit pci_epf_test_exit(void)
1057 {
1058 	if (kpcitest_workqueue)
1059 		destroy_workqueue(kpcitest_workqueue);
1060 	pci_epf_unregister_driver(&test_driver);
1061 }
1062 module_exit(pci_epf_test_exit);
1063 
1064 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1065 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1066 MODULE_LICENSE("GPL v2");
1067