1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Endpoint Function Driver to implement Non-Transparent Bridge functionality
4 * Between PCI RC and EP
5 *
6 * Copyright (C) 2020 Texas Instruments
7 * Copyright (C) 2022 NXP
8 *
9 * Based on pci-epf-ntb.c
10 * Author: Frank Li <Frank.Li@nxp.com>
11 * Author: Kishon Vijay Abraham I <kishon@ti.com>
12 */
13
14 /*
15 * +------------+ +---------------------------------------+
16 * | | | |
17 * +------------+ | +--------------+
18 * | NTB | | | NTB |
19 * | NetDev | | | NetDev |
20 * +------------+ | +--------------+
21 * | NTB | | | NTB |
22 * | Transfer | | | Transfer |
23 * +------------+ | +--------------+
24 * | | | | |
25 * | PCI NTB | | | |
26 * | EPF | | | |
27 * | Driver | | | PCI Virtual |
28 * | | +---------------+ | NTB Driver |
29 * | | | PCI EP NTB |<------>| |
30 * | | | FN Driver | | |
31 * +------------+ +---------------+ +--------------+
32 * | | | | | |
33 * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI |
34 * | | PCI | | | Bus |
35 * +------------+ +---------------+--------+--------------+
36 * PCIe Root Port PCI EP
37 */
38
39 #include <linux/delay.h>
40 #include <linux/io.h>
41 #include <linux/module.h>
42 #include <linux/slab.h>
43
44 #include <linux/pci-epc.h>
45 #include <linux/pci-epf.h>
46 #include <linux/ntb.h>
47
48 static struct workqueue_struct *kpcintb_workqueue;
49
50 #define COMMAND_CONFIGURE_DOORBELL 1
51 #define COMMAND_TEARDOWN_DOORBELL 2
52 #define COMMAND_CONFIGURE_MW 3
53 #define COMMAND_TEARDOWN_MW 4
54 #define COMMAND_LINK_UP 5
55 #define COMMAND_LINK_DOWN 6
56
57 #define COMMAND_STATUS_OK 1
58 #define COMMAND_STATUS_ERROR 2
59
60 #define LINK_STATUS_UP BIT(0)
61
62 #define SPAD_COUNT 64
63 #define DB_COUNT 4
64 #define NTB_MW_OFFSET 2
65 #define DB_COUNT_MASK GENMASK(15, 0)
66 #define MSIX_ENABLE BIT(16)
67 #define MAX_DB_COUNT 32
68 #define MAX_MW 4
69
70 enum epf_ntb_bar {
71 BAR_CONFIG,
72 BAR_DB,
73 BAR_MW1,
74 BAR_MW2,
75 BAR_MW3,
76 BAR_MW4,
77 VNTB_BAR_NUM,
78 };
79
80 /*
81 * +--------------------------------------------------+ Base
82 * | |
83 * | |
84 * | |
85 * | Common Control Register |
86 * | |
87 * | |
88 * | |
89 * +-----------------------+--------------------------+ Base+spad_offset
90 * | | |
91 * | Peer Spad Space | Spad Space |
92 * | | |
93 * | | |
94 * +-----------------------+--------------------------+ Base+spad_offset
95 * | | | +spad_count * 4
96 * | | |
97 * | Spad Space | Peer Spad Space |
98 * | | |
99 * +-----------------------+--------------------------+
100 * Virtual PCI PCIe Endpoint
101 * NTB Driver NTB Driver
102 */
103 struct epf_ntb_ctrl {
104 u32 command;
105 u32 argument;
106 u16 command_status;
107 u16 link_status;
108 u32 topology;
109 u64 addr;
110 u64 size;
111 u32 num_mws;
112 u32 reserved;
113 u32 spad_offset;
114 u32 spad_count;
115 u32 db_entry_size;
116 u32 db_data[MAX_DB_COUNT];
117 u32 db_offset[MAX_DB_COUNT];
118 } __packed;
119
120 struct epf_ntb {
121 struct ntb_dev ntb;
122 struct pci_epf *epf;
123 struct config_group group;
124
125 u32 num_mws;
126 u32 db_count;
127 u32 spad_count;
128 u64 mws_size[MAX_MW];
129 u64 db;
130 u32 vbus_number;
131 u16 vntb_pid;
132 u16 vntb_vid;
133
134 bool linkup;
135 u32 spad_size;
136
137 enum pci_barno epf_ntb_bar[VNTB_BAR_NUM];
138
139 struct epf_ntb_ctrl *reg;
140
141 u32 *epf_db;
142
143 phys_addr_t vpci_mw_phy[MAX_MW];
144 void __iomem *vpci_mw_addr[MAX_MW];
145
146 struct delayed_work cmd_handler;
147 };
148
149 #define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
150 #define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb)
151
152 static struct pci_epf_header epf_ntb_header = {
153 .vendorid = PCI_ANY_ID,
154 .deviceid = PCI_ANY_ID,
155 .baseclass_code = PCI_BASE_CLASS_MEMORY,
156 .interrupt_pin = PCI_INTERRUPT_INTA,
157 };
158
159 /**
160 * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host (VHOST)
161 * @ntb: NTB device that facilitates communication between HOST and VHOST
162 * @link_up: true or false indicating Link is UP or Down
163 *
164 * Once NTB function in HOST invoke ntb_link_enable(),
165 * this NTB function driver will trigger a link event to VHOST.
166 *
167 * Returns: Zero for success, or an error code in case of failure
168 */
epf_ntb_link_up(struct epf_ntb * ntb,bool link_up)169 static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
170 {
171 if (link_up)
172 ntb->reg->link_status |= LINK_STATUS_UP;
173 else
174 ntb->reg->link_status &= ~LINK_STATUS_UP;
175
176 ntb_link_event(&ntb->ntb);
177 return 0;
178 }
179
180 /**
181 * epf_ntb_configure_mw() - Configure the Outbound Address Space for VHOST
182 * to access the memory window of HOST
183 * @ntb: NTB device that facilitates communication between HOST and VHOST
184 * @mw: Index of the memory window (either 0, 1, 2 or 3)
185 *
186 * EP Outbound Window
187 * +--------+ +-----------+
188 * | | | |
189 * | | | |
190 * | | | |
191 * | | | |
192 * | | +-----------+
193 * | Virtual| | Memory Win|
194 * | NTB | -----------> | |
195 * | Driver | | |
196 * | | +-----------+
197 * | | | |
198 * | | | |
199 * +--------+ +-----------+
200 * VHOST PCI EP
201 *
202 * Returns: Zero for success, or an error code in case of failure
203 */
epf_ntb_configure_mw(struct epf_ntb * ntb,u32 mw)204 static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
205 {
206 phys_addr_t phys_addr;
207 u8 func_no, vfunc_no;
208 u64 addr, size;
209 int ret = 0;
210
211 phys_addr = ntb->vpci_mw_phy[mw];
212 addr = ntb->reg->addr;
213 size = ntb->reg->size;
214
215 func_no = ntb->epf->func_no;
216 vfunc_no = ntb->epf->vfunc_no;
217
218 ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size);
219 if (ret)
220 dev_err(&ntb->epf->epc->dev,
221 "Failed to map memory window %d address\n", mw);
222 return ret;
223 }
224
225 /**
226 * epf_ntb_teardown_mw() - Teardown the configured OB ATU
227 * @ntb: NTB device that facilitates communication between HOST and VHOST
228 * @mw: Index of the memory window (either 0, 1, 2 or 3)
229 *
230 * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
231 * pci_epc_unmap_addr()
232 */
epf_ntb_teardown_mw(struct epf_ntb * ntb,u32 mw)233 static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
234 {
235 pci_epc_unmap_addr(ntb->epf->epc,
236 ntb->epf->func_no,
237 ntb->epf->vfunc_no,
238 ntb->vpci_mw_phy[mw]);
239 }
240
241 /**
242 * epf_ntb_cmd_handler() - Handle commands provided by the NTB HOST
243 * @work: work_struct for the epf_ntb_epc
244 *
245 * Workqueue function that gets invoked for the two epf_ntb_epc
246 * periodically (once every 5ms) to see if it has received any commands
247 * from NTB HOST. The HOST can send commands to configure doorbell or
248 * configure memory window or to update link status.
249 */
epf_ntb_cmd_handler(struct work_struct * work)250 static void epf_ntb_cmd_handler(struct work_struct *work)
251 {
252 struct epf_ntb_ctrl *ctrl;
253 u32 command, argument;
254 struct epf_ntb *ntb;
255 struct device *dev;
256 int ret;
257 int i;
258
259 ntb = container_of(work, struct epf_ntb, cmd_handler.work);
260
261 for (i = 1; i < ntb->db_count; i++) {
262 if (ntb->epf_db[i]) {
263 ntb->db |= 1 << (i - 1);
264 ntb_db_event(&ntb->ntb, i);
265 ntb->epf_db[i] = 0;
266 }
267 }
268
269 ctrl = ntb->reg;
270 command = ctrl->command;
271 if (!command)
272 goto reset_handler;
273 argument = ctrl->argument;
274
275 ctrl->command = 0;
276 ctrl->argument = 0;
277
278 ctrl = ntb->reg;
279 dev = &ntb->epf->dev;
280
281 switch (command) {
282 case COMMAND_CONFIGURE_DOORBELL:
283 ctrl->command_status = COMMAND_STATUS_OK;
284 break;
285 case COMMAND_TEARDOWN_DOORBELL:
286 ctrl->command_status = COMMAND_STATUS_OK;
287 break;
288 case COMMAND_CONFIGURE_MW:
289 ret = epf_ntb_configure_mw(ntb, argument);
290 if (ret < 0)
291 ctrl->command_status = COMMAND_STATUS_ERROR;
292 else
293 ctrl->command_status = COMMAND_STATUS_OK;
294 break;
295 case COMMAND_TEARDOWN_MW:
296 epf_ntb_teardown_mw(ntb, argument);
297 ctrl->command_status = COMMAND_STATUS_OK;
298 break;
299 case COMMAND_LINK_UP:
300 ntb->linkup = true;
301 ret = epf_ntb_link_up(ntb, true);
302 if (ret < 0)
303 ctrl->command_status = COMMAND_STATUS_ERROR;
304 else
305 ctrl->command_status = COMMAND_STATUS_OK;
306 goto reset_handler;
307 case COMMAND_LINK_DOWN:
308 ntb->linkup = false;
309 ret = epf_ntb_link_up(ntb, false);
310 if (ret < 0)
311 ctrl->command_status = COMMAND_STATUS_ERROR;
312 else
313 ctrl->command_status = COMMAND_STATUS_OK;
314 break;
315 default:
316 dev_err(dev, "UNKNOWN command: %d\n", command);
317 break;
318 }
319
320 reset_handler:
321 queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
322 msecs_to_jiffies(5));
323 }
324
325 /**
326 * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
327 * @ntb: EPC associated with one of the HOST which holds peer's outbound
328 * address.
329 *
330 * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
331 * self scratchpad region (removes inbound ATU configuration). While BAR0 is
332 * the default self scratchpad BAR, an NTB could have other BARs for self
333 * scratchpad (because of reserved BARs). This function can get the exact BAR
334 * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
335 *
336 * Please note the self scratchpad region and config region is combined to
337 * a single region and mapped using the same BAR. Also note VHOST's peer
338 * scratchpad is HOST's self scratchpad.
339 *
340 * Returns: void
341 */
epf_ntb_config_sspad_bar_clear(struct epf_ntb * ntb)342 static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
343 {
344 struct pci_epf_bar *epf_bar;
345 enum pci_barno barno;
346
347 barno = ntb->epf_ntb_bar[BAR_CONFIG];
348 epf_bar = &ntb->epf->bar[barno];
349
350 pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
351 }
352
353 /**
354 * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
355 * @ntb: NTB device that facilitates communication between HOST and VHOST
356 *
357 * Map BAR0 of EP CONTROLLER which contains the VHOST's config and
358 * self scratchpad region.
359 *
360 * Please note the self scratchpad region and config region is combined to
361 * a single region and mapped using the same BAR.
362 *
363 * Returns: Zero for success, or an error code in case of failure
364 */
epf_ntb_config_sspad_bar_set(struct epf_ntb * ntb)365 static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
366 {
367 struct pci_epf_bar *epf_bar;
368 enum pci_barno barno;
369 u8 func_no, vfunc_no;
370 struct device *dev;
371 int ret;
372
373 dev = &ntb->epf->dev;
374 func_no = ntb->epf->func_no;
375 vfunc_no = ntb->epf->vfunc_no;
376 barno = ntb->epf_ntb_bar[BAR_CONFIG];
377 epf_bar = &ntb->epf->bar[barno];
378
379 ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar);
380 if (ret) {
381 dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n");
382 return ret;
383 }
384 return 0;
385 }
386
387 /**
388 * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
389 * config + scratchpad region
390 * @ntb: NTB device that facilitates communication between HOST and VHOST
391 */
epf_ntb_config_spad_bar_free(struct epf_ntb * ntb)392 static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
393 {
394 enum pci_barno barno;
395
396 barno = ntb->epf_ntb_bar[BAR_CONFIG];
397 pci_epf_free_space(ntb->epf, ntb->reg, barno, 0);
398 }
399
400 /**
401 * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
402 * region
403 * @ntb: NTB device that facilitates communication between HOST and VHOST
404 *
405 * Allocate the Local Memory mentioned in the above diagram. The size of
406 * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
407 * is obtained from "spad-count" configfs entry.
408 *
409 * Returns: Zero for success, or an error code in case of failure
410 */
epf_ntb_config_spad_bar_alloc(struct epf_ntb * ntb)411 static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
412 {
413 enum pci_barno barno;
414 struct epf_ntb_ctrl *ctrl;
415 u32 spad_size, ctrl_size;
416 struct pci_epf *epf = ntb->epf;
417 struct device *dev = &epf->dev;
418 u32 spad_count;
419 void *base;
420 int i;
421 const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc,
422 epf->func_no,
423 epf->vfunc_no);
424 barno = ntb->epf_ntb_bar[BAR_CONFIG];
425 spad_count = ntb->spad_count;
426
427 ctrl_size = ALIGN(sizeof(struct epf_ntb_ctrl), sizeof(u32));
428 spad_size = 2 * spad_count * sizeof(u32);
429
430 base = pci_epf_alloc_space(epf, ctrl_size + spad_size,
431 barno, epc_features, 0);
432 if (!base) {
433 dev_err(dev, "Config/Status/SPAD alloc region fail\n");
434 return -ENOMEM;
435 }
436
437 ntb->reg = base;
438
439 ctrl = ntb->reg;
440 ctrl->spad_offset = ctrl_size;
441
442 ctrl->spad_count = spad_count;
443 ctrl->num_mws = ntb->num_mws;
444 ntb->spad_size = spad_size;
445
446 ctrl->db_entry_size = sizeof(u32);
447
448 for (i = 0; i < ntb->db_count; i++) {
449 ntb->reg->db_data[i] = 1 + i;
450 ntb->reg->db_offset[i] = 0;
451 }
452
453 return 0;
454 }
455
456 /**
457 * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
458 * @ntb: NTB device that facilitates communication between HOST and VHOST
459 *
460 * Configure MSI/MSI-X capability for each interface with number of
461 * interrupts equal to "db_count" configfs entry.
462 *
463 * Returns: Zero for success, or an error code in case of failure
464 */
epf_ntb_configure_interrupt(struct epf_ntb * ntb)465 static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
466 {
467 const struct pci_epc_features *epc_features;
468 struct device *dev;
469 u32 db_count;
470 int ret;
471
472 dev = &ntb->epf->dev;
473
474 epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
475
476 if (!(epc_features->msix_capable || epc_features->msi_capable)) {
477 dev_err(dev, "MSI or MSI-X is required for doorbell\n");
478 return -EINVAL;
479 }
480
481 db_count = ntb->db_count;
482 if (db_count > MAX_DB_COUNT) {
483 dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
484 return -EINVAL;
485 }
486
487 ntb->db_count = db_count;
488
489 if (epc_features->msi_capable) {
490 ret = pci_epc_set_msi(ntb->epf->epc,
491 ntb->epf->func_no,
492 ntb->epf->vfunc_no,
493 16);
494 if (ret) {
495 dev_err(dev, "MSI configuration failed\n");
496 return ret;
497 }
498 }
499
500 return 0;
501 }
502
503 /**
504 * epf_ntb_db_bar_init() - Configure Doorbell window BARs
505 * @ntb: NTB device that facilitates communication between HOST and VHOST
506 *
507 * Returns: Zero for success, or an error code in case of failure
508 */
epf_ntb_db_bar_init(struct epf_ntb * ntb)509 static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
510 {
511 const struct pci_epc_features *epc_features;
512 struct device *dev = &ntb->epf->dev;
513 int ret;
514 struct pci_epf_bar *epf_bar;
515 void *mw_addr;
516 enum pci_barno barno;
517 size_t size = sizeof(u32) * ntb->db_count;
518
519 epc_features = pci_epc_get_features(ntb->epf->epc,
520 ntb->epf->func_no,
521 ntb->epf->vfunc_no);
522 barno = ntb->epf_ntb_bar[BAR_DB];
523
524 mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, epc_features, 0);
525 if (!mw_addr) {
526 dev_err(dev, "Failed to allocate OB address\n");
527 return -ENOMEM;
528 }
529
530 ntb->epf_db = mw_addr;
531
532 epf_bar = &ntb->epf->bar[barno];
533
534 ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
535 if (ret) {
536 dev_err(dev, "Doorbell BAR set failed\n");
537 goto err_alloc_peer_mem;
538 }
539 return ret;
540
541 err_alloc_peer_mem:
542 pci_epf_free_space(ntb->epf, mw_addr, barno, 0);
543 return -1;
544 }
545
546 static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
547
548 /**
549 * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
550 * allocated in peer's outbound address space
551 * @ntb: NTB device that facilitates communication between HOST and VHOST
552 */
epf_ntb_db_bar_clear(struct epf_ntb * ntb)553 static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
554 {
555 enum pci_barno barno;
556
557 barno = ntb->epf_ntb_bar[BAR_DB];
558 pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
559 pci_epc_clear_bar(ntb->epf->epc,
560 ntb->epf->func_no,
561 ntb->epf->vfunc_no,
562 &ntb->epf->bar[barno]);
563 }
564
565 /**
566 * epf_ntb_mw_bar_init() - Configure Memory window BARs
567 * @ntb: NTB device that facilitates communication between HOST and VHOST
568 *
569 * Returns: Zero for success, or an error code in case of failure
570 */
epf_ntb_mw_bar_init(struct epf_ntb * ntb)571 static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
572 {
573 int ret = 0;
574 int i;
575 u64 size;
576 enum pci_barno barno;
577 struct device *dev = &ntb->epf->dev;
578
579 for (i = 0; i < ntb->num_mws; i++) {
580 size = ntb->mws_size[i];
581 barno = ntb->epf_ntb_bar[BAR_MW1 + i];
582
583 ntb->epf->bar[barno].barno = barno;
584 ntb->epf->bar[barno].size = size;
585 ntb->epf->bar[barno].addr = NULL;
586 ntb->epf->bar[barno].phys_addr = 0;
587 ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
588 PCI_BASE_ADDRESS_MEM_TYPE_64 :
589 PCI_BASE_ADDRESS_MEM_TYPE_32;
590
591 ret = pci_epc_set_bar(ntb->epf->epc,
592 ntb->epf->func_no,
593 ntb->epf->vfunc_no,
594 &ntb->epf->bar[barno]);
595 if (ret) {
596 dev_err(dev, "MW set failed\n");
597 goto err_alloc_mem;
598 }
599
600 /* Allocate EPC outbound memory windows to vpci vntb device */
601 ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc,
602 &ntb->vpci_mw_phy[i],
603 size);
604 if (!ntb->vpci_mw_addr[i]) {
605 ret = -ENOMEM;
606 dev_err(dev, "Failed to allocate source address\n");
607 goto err_set_bar;
608 }
609 }
610
611 return ret;
612
613 err_set_bar:
614 pci_epc_clear_bar(ntb->epf->epc,
615 ntb->epf->func_no,
616 ntb->epf->vfunc_no,
617 &ntb->epf->bar[barno]);
618 err_alloc_mem:
619 epf_ntb_mw_bar_clear(ntb, i);
620 return ret;
621 }
622
623 /**
624 * epf_ntb_mw_bar_clear() - Clear Memory window BARs
625 * @ntb: NTB device that facilitates communication between HOST and VHOST
626 * @num_mws: the number of Memory window BARs that to be cleared
627 */
epf_ntb_mw_bar_clear(struct epf_ntb * ntb,int num_mws)628 static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
629 {
630 enum pci_barno barno;
631 int i;
632
633 for (i = 0; i < num_mws; i++) {
634 barno = ntb->epf_ntb_bar[BAR_MW1 + i];
635 pci_epc_clear_bar(ntb->epf->epc,
636 ntb->epf->func_no,
637 ntb->epf->vfunc_no,
638 &ntb->epf->bar[barno]);
639
640 pci_epc_mem_free_addr(ntb->epf->epc,
641 ntb->vpci_mw_phy[i],
642 ntb->vpci_mw_addr[i],
643 ntb->mws_size[i]);
644 }
645 }
646
647 /**
648 * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
649 * @ntb: NTB device that facilitates communication between HOST and VHOST
650 *
651 * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
652 */
epf_ntb_epc_destroy(struct epf_ntb * ntb)653 static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
654 {
655 pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
656 pci_epc_put(ntb->epf->epc);
657 }
658
659
660 /**
661 * epf_ntb_is_bar_used() - Check if a bar is used in the ntb configuration
662 * @ntb: NTB device that facilitates communication between HOST and VHOST
663 * @barno: Checked bar number
664 *
665 * Returns: true if used, false if free.
666 */
epf_ntb_is_bar_used(struct epf_ntb * ntb,enum pci_barno barno)667 static bool epf_ntb_is_bar_used(struct epf_ntb *ntb,
668 enum pci_barno barno)
669 {
670 int i;
671
672 for (i = 0; i < VNTB_BAR_NUM; i++) {
673 if (ntb->epf_ntb_bar[i] == barno)
674 return true;
675 }
676
677 return false;
678 }
679
680 /**
681 * epf_ntb_find_bar() - Assign BAR number when no configuration is provided
682 * @ntb: NTB device that facilitates communication between HOST and VHOST
683 * @epc_features: The features provided by the EPC specific to this EPF
684 * @bar: NTB BAR index
685 * @barno: Bar start index
686 *
687 * When the BAR configuration was not provided through the userspace
688 * configuration, automatically assign BAR as it has been historically
689 * done by this endpoint function.
690 *
691 * Returns: the BAR number found, if any. -1 otherwise
692 */
epf_ntb_find_bar(struct epf_ntb * ntb,const struct pci_epc_features * epc_features,enum epf_ntb_bar bar,enum pci_barno barno)693 static int epf_ntb_find_bar(struct epf_ntb *ntb,
694 const struct pci_epc_features *epc_features,
695 enum epf_ntb_bar bar,
696 enum pci_barno barno)
697 {
698 while (ntb->epf_ntb_bar[bar] < 0) {
699 barno = pci_epc_get_next_free_bar(epc_features, barno);
700 if (barno < 0)
701 break; /* No more BAR available */
702
703 /*
704 * Verify if the BAR found is not already assigned
705 * through the provided configuration
706 */
707 if (!epf_ntb_is_bar_used(ntb, barno))
708 ntb->epf_ntb_bar[bar] = barno;
709
710 barno += 1;
711 }
712
713 return barno;
714 }
715
716 /**
717 * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
718 * constructs (scratchpad region, doorbell, memorywindow)
719 * @ntb: NTB device that facilitates communication between HOST and VHOST
720 *
721 * Returns: Zero for success, or an error code in case of failure
722 */
epf_ntb_init_epc_bar(struct epf_ntb * ntb)723 static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
724 {
725 const struct pci_epc_features *epc_features;
726 enum pci_barno barno;
727 enum epf_ntb_bar bar;
728 struct device *dev;
729 u32 num_mws;
730 int i;
731
732 barno = BAR_0;
733 num_mws = ntb->num_mws;
734 dev = &ntb->epf->dev;
735 epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
736
737 /* These are required BARs which are mandatory for NTB functionality */
738 for (bar = BAR_CONFIG; bar <= BAR_MW1; bar++) {
739 barno = epf_ntb_find_bar(ntb, epc_features, bar, barno);
740 if (barno < 0) {
741 dev_err(dev, "Fail to get NTB function BAR\n");
742 return -ENOENT;
743 }
744 }
745
746 /* These are optional BARs which don't impact NTB functionality */
747 for (bar = BAR_MW1, i = 1; i < num_mws; bar++, i++) {
748 barno = epf_ntb_find_bar(ntb, epc_features, bar, barno);
749 if (barno < 0) {
750 ntb->num_mws = i;
751 dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
752 }
753 }
754
755 return 0;
756 }
757
758 /**
759 * epf_ntb_epc_init() - Initialize NTB interface
760 * @ntb: NTB device that facilitates communication between HOST and VHOST
761 *
762 * Wrapper to initialize a particular EPC interface and start the workqueue
763 * to check for commands from HOST. This function will write to the
764 * EP controller HW for configuring it.
765 *
766 * Returns: Zero for success, or an error code in case of failure
767 */
epf_ntb_epc_init(struct epf_ntb * ntb)768 static int epf_ntb_epc_init(struct epf_ntb *ntb)
769 {
770 u8 func_no, vfunc_no;
771 struct pci_epc *epc;
772 struct pci_epf *epf;
773 struct device *dev;
774 int ret;
775
776 epf = ntb->epf;
777 dev = &epf->dev;
778 epc = epf->epc;
779 func_no = ntb->epf->func_no;
780 vfunc_no = ntb->epf->vfunc_no;
781
782 ret = epf_ntb_config_sspad_bar_set(ntb);
783 if (ret) {
784 dev_err(dev, "Config/self SPAD BAR init failed");
785 return ret;
786 }
787
788 ret = epf_ntb_configure_interrupt(ntb);
789 if (ret) {
790 dev_err(dev, "Interrupt configuration failed\n");
791 goto err_config_interrupt;
792 }
793
794 ret = epf_ntb_db_bar_init(ntb);
795 if (ret) {
796 dev_err(dev, "DB BAR init failed\n");
797 goto err_db_bar_init;
798 }
799
800 ret = epf_ntb_mw_bar_init(ntb);
801 if (ret) {
802 dev_err(dev, "MW BAR init failed\n");
803 goto err_mw_bar_init;
804 }
805
806 if (vfunc_no <= 1) {
807 ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
808 if (ret) {
809 dev_err(dev, "Configuration header write failed\n");
810 goto err_write_header;
811 }
812 }
813
814 INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler);
815 queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
816
817 return 0;
818
819 err_write_header:
820 epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
821 err_mw_bar_init:
822 epf_ntb_db_bar_clear(ntb);
823 err_db_bar_init:
824 err_config_interrupt:
825 epf_ntb_config_sspad_bar_clear(ntb);
826
827 return ret;
828 }
829
830
831 /**
832 * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
833 * @ntb: NTB device that facilitates communication between HOST and VHOST
834 *
835 * Wrapper to cleanup all NTB interfaces.
836 */
epf_ntb_epc_cleanup(struct epf_ntb * ntb)837 static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
838 {
839 epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
840 epf_ntb_db_bar_clear(ntb);
841 epf_ntb_config_sspad_bar_clear(ntb);
842 }
843
844 #define EPF_NTB_R(_name) \
845 static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
846 char *page) \
847 { \
848 struct config_group *group = to_config_group(item); \
849 struct epf_ntb *ntb = to_epf_ntb(group); \
850 \
851 return sprintf(page, "%d\n", ntb->_name); \
852 }
853
854 #define EPF_NTB_W(_name) \
855 static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
856 const char *page, size_t len) \
857 { \
858 struct config_group *group = to_config_group(item); \
859 struct epf_ntb *ntb = to_epf_ntb(group); \
860 u32 val; \
861 int ret; \
862 \
863 ret = kstrtou32(page, 0, &val); \
864 if (ret) \
865 return ret; \
866 \
867 ntb->_name = val; \
868 \
869 return len; \
870 }
871
872 #define EPF_NTB_MW_R(_name) \
873 static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
874 char *page) \
875 { \
876 struct config_group *group = to_config_group(item); \
877 struct epf_ntb *ntb = to_epf_ntb(group); \
878 struct device *dev = &ntb->epf->dev; \
879 int win_no; \
880 \
881 if (sscanf(#_name, "mw%d", &win_no) != 1) \
882 return -EINVAL; \
883 \
884 if (win_no <= 0 || win_no > ntb->num_mws) { \
885 dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
886 return -EINVAL; \
887 } \
888 \
889 return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
890 }
891
892 #define EPF_NTB_MW_W(_name) \
893 static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
894 const char *page, size_t len) \
895 { \
896 struct config_group *group = to_config_group(item); \
897 struct epf_ntb *ntb = to_epf_ntb(group); \
898 struct device *dev = &ntb->epf->dev; \
899 int win_no; \
900 u64 val; \
901 int ret; \
902 \
903 ret = kstrtou64(page, 0, &val); \
904 if (ret) \
905 return ret; \
906 \
907 if (sscanf(#_name, "mw%d", &win_no) != 1) \
908 return -EINVAL; \
909 \
910 if (win_no <= 0 || win_no > ntb->num_mws) { \
911 dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
912 return -EINVAL; \
913 } \
914 \
915 ntb->mws_size[win_no - 1] = val; \
916 \
917 return len; \
918 }
919
920 #define EPF_NTB_BAR_R(_name, _id) \
921 static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
922 char *page) \
923 { \
924 struct config_group *group = to_config_group(item); \
925 struct epf_ntb *ntb = to_epf_ntb(group); \
926 \
927 return sprintf(page, "%d\n", ntb->epf_ntb_bar[_id]); \
928 }
929
930 #define EPF_NTB_BAR_W(_name, _id) \
931 static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
932 const char *page, size_t len) \
933 { \
934 struct config_group *group = to_config_group(item); \
935 struct epf_ntb *ntb = to_epf_ntb(group); \
936 int val; \
937 int ret; \
938 \
939 ret = kstrtoint(page, 0, &val); \
940 if (ret) \
941 return ret; \
942 \
943 if (val < NO_BAR || val > BAR_5) \
944 return -EINVAL; \
945 \
946 ntb->epf_ntb_bar[_id] = val; \
947 \
948 return len; \
949 }
950
epf_ntb_num_mws_store(struct config_item * item,const char * page,size_t len)951 static ssize_t epf_ntb_num_mws_store(struct config_item *item,
952 const char *page, size_t len)
953 {
954 struct config_group *group = to_config_group(item);
955 struct epf_ntb *ntb = to_epf_ntb(group);
956 u32 val;
957 int ret;
958
959 ret = kstrtou32(page, 0, &val);
960 if (ret)
961 return ret;
962
963 if (val > MAX_MW)
964 return -EINVAL;
965
966 ntb->num_mws = val;
967
968 return len;
969 }
970
971 EPF_NTB_R(spad_count)
972 EPF_NTB_W(spad_count)
973 EPF_NTB_R(db_count)
974 EPF_NTB_W(db_count)
975 EPF_NTB_R(num_mws)
976 EPF_NTB_R(vbus_number)
977 EPF_NTB_W(vbus_number)
978 EPF_NTB_R(vntb_pid)
979 EPF_NTB_W(vntb_pid)
980 EPF_NTB_R(vntb_vid)
981 EPF_NTB_W(vntb_vid)
982 EPF_NTB_MW_R(mw1)
983 EPF_NTB_MW_W(mw1)
984 EPF_NTB_MW_R(mw2)
985 EPF_NTB_MW_W(mw2)
986 EPF_NTB_MW_R(mw3)
987 EPF_NTB_MW_W(mw3)
988 EPF_NTB_MW_R(mw4)
989 EPF_NTB_MW_W(mw4)
990 EPF_NTB_BAR_R(ctrl_bar, BAR_CONFIG)
991 EPF_NTB_BAR_W(ctrl_bar, BAR_CONFIG)
992 EPF_NTB_BAR_R(db_bar, BAR_DB)
993 EPF_NTB_BAR_W(db_bar, BAR_DB)
994 EPF_NTB_BAR_R(mw1_bar, BAR_MW1)
995 EPF_NTB_BAR_W(mw1_bar, BAR_MW1)
996 EPF_NTB_BAR_R(mw2_bar, BAR_MW2)
997 EPF_NTB_BAR_W(mw2_bar, BAR_MW2)
998 EPF_NTB_BAR_R(mw3_bar, BAR_MW3)
999 EPF_NTB_BAR_W(mw3_bar, BAR_MW3)
1000 EPF_NTB_BAR_R(mw4_bar, BAR_MW4)
1001 EPF_NTB_BAR_W(mw4_bar, BAR_MW4)
1002
1003 CONFIGFS_ATTR(epf_ntb_, spad_count);
1004 CONFIGFS_ATTR(epf_ntb_, db_count);
1005 CONFIGFS_ATTR(epf_ntb_, num_mws);
1006 CONFIGFS_ATTR(epf_ntb_, mw1);
1007 CONFIGFS_ATTR(epf_ntb_, mw2);
1008 CONFIGFS_ATTR(epf_ntb_, mw3);
1009 CONFIGFS_ATTR(epf_ntb_, mw4);
1010 CONFIGFS_ATTR(epf_ntb_, vbus_number);
1011 CONFIGFS_ATTR(epf_ntb_, vntb_pid);
1012 CONFIGFS_ATTR(epf_ntb_, vntb_vid);
1013 CONFIGFS_ATTR(epf_ntb_, ctrl_bar);
1014 CONFIGFS_ATTR(epf_ntb_, db_bar);
1015 CONFIGFS_ATTR(epf_ntb_, mw1_bar);
1016 CONFIGFS_ATTR(epf_ntb_, mw2_bar);
1017 CONFIGFS_ATTR(epf_ntb_, mw3_bar);
1018 CONFIGFS_ATTR(epf_ntb_, mw4_bar);
1019
1020 static struct configfs_attribute *epf_ntb_attrs[] = {
1021 &epf_ntb_attr_spad_count,
1022 &epf_ntb_attr_db_count,
1023 &epf_ntb_attr_num_mws,
1024 &epf_ntb_attr_mw1,
1025 &epf_ntb_attr_mw2,
1026 &epf_ntb_attr_mw3,
1027 &epf_ntb_attr_mw4,
1028 &epf_ntb_attr_vbus_number,
1029 &epf_ntb_attr_vntb_pid,
1030 &epf_ntb_attr_vntb_vid,
1031 &epf_ntb_attr_ctrl_bar,
1032 &epf_ntb_attr_db_bar,
1033 &epf_ntb_attr_mw1_bar,
1034 &epf_ntb_attr_mw2_bar,
1035 &epf_ntb_attr_mw3_bar,
1036 &epf_ntb_attr_mw4_bar,
1037 NULL,
1038 };
1039
1040 static const struct config_item_type ntb_group_type = {
1041 .ct_attrs = epf_ntb_attrs,
1042 .ct_owner = THIS_MODULE,
1043 };
1044
1045 /**
1046 * epf_ntb_add_cfs() - Add configfs directory specific to NTB
1047 * @epf: NTB endpoint function device
1048 * @group: A pointer to the config_group structure referencing a group of
1049 * config_items of a specific type that belong to a specific sub-system.
1050 *
1051 * Add configfs directory specific to NTB. This directory will hold
1052 * NTB specific properties like db_count, spad_count, num_mws etc.,
1053 *
1054 * Returns: Pointer to config_group
1055 */
epf_ntb_add_cfs(struct pci_epf * epf,struct config_group * group)1056 static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
1057 struct config_group *group)
1058 {
1059 struct epf_ntb *ntb = epf_get_drvdata(epf);
1060 struct config_group *ntb_group = &ntb->group;
1061 struct device *dev = &epf->dev;
1062
1063 config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
1064
1065 return ntb_group;
1066 }
1067
1068 /*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
1069
1070 static u32 pci_space[] = {
1071 0xffffffff, /* Device ID, Vendor ID */
1072 0, /* Status, Command */
1073 0xffffffff, /* Base Class, Subclass, Prog Intf, Revision ID */
1074 0x40, /* BIST, Header Type, Latency Timer, Cache Line Size */
1075 0, /* BAR 0 */
1076 0, /* BAR 1 */
1077 0, /* BAR 2 */
1078 0, /* BAR 3 */
1079 0, /* BAR 4 */
1080 0, /* BAR 5 */
1081 0, /* Cardbus CIS Pointer */
1082 0, /* Subsystem ID, Subsystem Vendor ID */
1083 0, /* ROM Base Address */
1084 0, /* Reserved, Capabilities Pointer */
1085 0, /* Reserved */
1086 0, /* Max_Lat, Min_Gnt, Interrupt Pin, Interrupt Line */
1087 };
1088
pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)1089 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
1090 {
1091 if (devfn == 0) {
1092 memcpy(val, ((u8 *)pci_space) + where, size);
1093 return PCIBIOS_SUCCESSFUL;
1094 }
1095 return PCIBIOS_DEVICE_NOT_FOUND;
1096 }
1097
pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)1098 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
1099 {
1100 return 0;
1101 }
1102
1103 static struct pci_ops vpci_ops = {
1104 .read = pci_read,
1105 .write = pci_write,
1106 };
1107
vpci_scan_bus(void * sysdata)1108 static int vpci_scan_bus(void *sysdata)
1109 {
1110 struct pci_bus *vpci_bus;
1111 struct epf_ntb *ndev = sysdata;
1112
1113 vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
1114 if (!vpci_bus) {
1115 pr_err("create pci bus failed\n");
1116 return -EINVAL;
1117 }
1118
1119 pci_bus_add_devices(vpci_bus);
1120
1121 return 0;
1122 }
1123
1124 /*==================== Virtual PCIe NTB driver ==========================*/
1125
vntb_epf_mw_count(struct ntb_dev * ntb,int pidx)1126 static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
1127 {
1128 struct epf_ntb *ndev = ntb_ndev(ntb);
1129
1130 return ndev->num_mws;
1131 }
1132
vntb_epf_spad_count(struct ntb_dev * ntb)1133 static int vntb_epf_spad_count(struct ntb_dev *ntb)
1134 {
1135 return ntb_ndev(ntb)->spad_count;
1136 }
1137
vntb_epf_peer_mw_count(struct ntb_dev * ntb)1138 static int vntb_epf_peer_mw_count(struct ntb_dev *ntb)
1139 {
1140 return ntb_ndev(ntb)->num_mws;
1141 }
1142
vntb_epf_db_valid_mask(struct ntb_dev * ntb)1143 static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb)
1144 {
1145 return BIT_ULL(ntb_ndev(ntb)->db_count) - 1;
1146 }
1147
vntb_epf_db_set_mask(struct ntb_dev * ntb,u64 db_bits)1148 static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1149 {
1150 return 0;
1151 }
1152
vntb_epf_mw_set_trans(struct ntb_dev * ndev,int pidx,int idx,dma_addr_t addr,resource_size_t size)1153 static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
1154 dma_addr_t addr, resource_size_t size)
1155 {
1156 struct epf_ntb *ntb = ntb_ndev(ndev);
1157 struct pci_epf_bar *epf_bar;
1158 enum pci_barno barno;
1159 int ret;
1160 struct device *dev;
1161
1162 dev = &ntb->ntb.dev;
1163 barno = ntb->epf_ntb_bar[BAR_MW1 + idx];
1164 epf_bar = &ntb->epf->bar[barno];
1165 epf_bar->phys_addr = addr;
1166 epf_bar->barno = barno;
1167 epf_bar->size = size;
1168
1169 ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
1170 if (ret) {
1171 dev_err(dev, "failure set mw trans\n");
1172 return ret;
1173 }
1174 return 0;
1175 }
1176
vntb_epf_mw_clear_trans(struct ntb_dev * ntb,int pidx,int idx)1177 static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
1178 {
1179 return 0;
1180 }
1181
vntb_epf_peer_mw_get_addr(struct ntb_dev * ndev,int idx,phys_addr_t * base,resource_size_t * size)1182 static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx,
1183 phys_addr_t *base, resource_size_t *size)
1184 {
1185
1186 struct epf_ntb *ntb = ntb_ndev(ndev);
1187
1188 if (base)
1189 *base = ntb->vpci_mw_phy[idx];
1190
1191 if (size)
1192 *size = ntb->mws_size[idx];
1193
1194 return 0;
1195 }
1196
vntb_epf_link_enable(struct ntb_dev * ntb,enum ntb_speed max_speed,enum ntb_width max_width)1197 static int vntb_epf_link_enable(struct ntb_dev *ntb,
1198 enum ntb_speed max_speed,
1199 enum ntb_width max_width)
1200 {
1201 return 0;
1202 }
1203
vntb_epf_spad_read(struct ntb_dev * ndev,int idx)1204 static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx)
1205 {
1206 struct epf_ntb *ntb = ntb_ndev(ndev);
1207 int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * sizeof(u32);
1208 u32 val;
1209 void __iomem *base = (void __iomem *)ntb->reg;
1210
1211 val = readl(base + off + ct + idx * sizeof(u32));
1212 return val;
1213 }
1214
vntb_epf_spad_write(struct ntb_dev * ndev,int idx,u32 val)1215 static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val)
1216 {
1217 struct epf_ntb *ntb = ntb_ndev(ndev);
1218 struct epf_ntb_ctrl *ctrl = ntb->reg;
1219 int off = ctrl->spad_offset, ct = ctrl->spad_count * sizeof(u32);
1220 void __iomem *base = (void __iomem *)ntb->reg;
1221
1222 writel(val, base + off + ct + idx * sizeof(u32));
1223 return 0;
1224 }
1225
vntb_epf_peer_spad_read(struct ntb_dev * ndev,int pidx,int idx)1226 static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx)
1227 {
1228 struct epf_ntb *ntb = ntb_ndev(ndev);
1229 struct epf_ntb_ctrl *ctrl = ntb->reg;
1230 int off = ctrl->spad_offset;
1231 void __iomem *base = (void __iomem *)ntb->reg;
1232 u32 val;
1233
1234 val = readl(base + off + idx * sizeof(u32));
1235 return val;
1236 }
1237
vntb_epf_peer_spad_write(struct ntb_dev * ndev,int pidx,int idx,u32 val)1238 static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val)
1239 {
1240 struct epf_ntb *ntb = ntb_ndev(ndev);
1241 struct epf_ntb_ctrl *ctrl = ntb->reg;
1242 int off = ctrl->spad_offset;
1243 void __iomem *base = (void __iomem *)ntb->reg;
1244
1245 writel(val, base + off + idx * sizeof(u32));
1246 return 0;
1247 }
1248
vntb_epf_peer_db_set(struct ntb_dev * ndev,u64 db_bits)1249 static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
1250 {
1251 u32 interrupt_num = ffs(db_bits) + 1;
1252 struct epf_ntb *ntb = ntb_ndev(ndev);
1253 u8 func_no, vfunc_no;
1254 int ret;
1255
1256 func_no = ntb->epf->func_no;
1257 vfunc_no = ntb->epf->vfunc_no;
1258
1259 ret = pci_epc_raise_irq(ntb->epf->epc, func_no, vfunc_no,
1260 PCI_IRQ_MSI, interrupt_num + 1);
1261 if (ret)
1262 dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
1263
1264 return ret;
1265 }
1266
vntb_epf_db_read(struct ntb_dev * ndev)1267 static u64 vntb_epf_db_read(struct ntb_dev *ndev)
1268 {
1269 struct epf_ntb *ntb = ntb_ndev(ndev);
1270
1271 return ntb->db;
1272 }
1273
vntb_epf_mw_get_align(struct ntb_dev * ndev,int pidx,int idx,resource_size_t * addr_align,resource_size_t * size_align,resource_size_t * size_max)1274 static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
1275 resource_size_t *addr_align,
1276 resource_size_t *size_align,
1277 resource_size_t *size_max)
1278 {
1279 struct epf_ntb *ntb = ntb_ndev(ndev);
1280
1281 if (addr_align)
1282 *addr_align = SZ_4K;
1283
1284 if (size_align)
1285 *size_align = 1;
1286
1287 if (size_max)
1288 *size_max = ntb->mws_size[idx];
1289
1290 return 0;
1291 }
1292
vntb_epf_link_is_up(struct ntb_dev * ndev,enum ntb_speed * speed,enum ntb_width * width)1293 static u64 vntb_epf_link_is_up(struct ntb_dev *ndev,
1294 enum ntb_speed *speed,
1295 enum ntb_width *width)
1296 {
1297 struct epf_ntb *ntb = ntb_ndev(ndev);
1298
1299 return ntb->reg->link_status;
1300 }
1301
vntb_epf_db_clear_mask(struct ntb_dev * ndev,u64 db_bits)1302 static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits)
1303 {
1304 return 0;
1305 }
1306
vntb_epf_db_clear(struct ntb_dev * ndev,u64 db_bits)1307 static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
1308 {
1309 struct epf_ntb *ntb = ntb_ndev(ndev);
1310
1311 ntb->db &= ~db_bits;
1312 return 0;
1313 }
1314
vntb_epf_link_disable(struct ntb_dev * ntb)1315 static int vntb_epf_link_disable(struct ntb_dev *ntb)
1316 {
1317 return 0;
1318 }
1319
1320 static const struct ntb_dev_ops vntb_epf_ops = {
1321 .mw_count = vntb_epf_mw_count,
1322 .spad_count = vntb_epf_spad_count,
1323 .peer_mw_count = vntb_epf_peer_mw_count,
1324 .db_valid_mask = vntb_epf_db_valid_mask,
1325 .db_set_mask = vntb_epf_db_set_mask,
1326 .mw_set_trans = vntb_epf_mw_set_trans,
1327 .mw_clear_trans = vntb_epf_mw_clear_trans,
1328 .peer_mw_get_addr = vntb_epf_peer_mw_get_addr,
1329 .link_enable = vntb_epf_link_enable,
1330 .spad_read = vntb_epf_spad_read,
1331 .spad_write = vntb_epf_spad_write,
1332 .peer_spad_read = vntb_epf_peer_spad_read,
1333 .peer_spad_write = vntb_epf_peer_spad_write,
1334 .peer_db_set = vntb_epf_peer_db_set,
1335 .db_read = vntb_epf_db_read,
1336 .mw_get_align = vntb_epf_mw_get_align,
1337 .link_is_up = vntb_epf_link_is_up,
1338 .db_clear_mask = vntb_epf_db_clear_mask,
1339 .db_clear = vntb_epf_db_clear,
1340 .link_disable = vntb_epf_link_disable,
1341 };
1342
pci_vntb_probe(struct pci_dev * pdev,const struct pci_device_id * id)1343 static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1344 {
1345 int ret;
1346 struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata;
1347 struct device *dev = &pdev->dev;
1348
1349 ndev->ntb.pdev = pdev;
1350 ndev->ntb.topo = NTB_TOPO_NONE;
1351 ndev->ntb.ops = &vntb_epf_ops;
1352
1353 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1354 if (ret) {
1355 dev_err(dev, "Cannot set DMA mask\n");
1356 return ret;
1357 }
1358
1359 ret = ntb_register_device(&ndev->ntb);
1360 if (ret) {
1361 dev_err(dev, "Failed to register NTB device\n");
1362 return ret;
1363 }
1364
1365 dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
1366 return 0;
1367 }
1368
1369 static struct pci_device_id pci_vntb_table[] = {
1370 {
1371 PCI_DEVICE(0xffff, 0xffff),
1372 },
1373 {},
1374 };
1375
1376 static struct pci_driver vntb_pci_driver = {
1377 .name = "pci-vntb",
1378 .id_table = pci_vntb_table,
1379 .probe = pci_vntb_probe,
1380 };
1381
1382 /* ============ PCIe EPF Driver Bind ====================*/
1383
1384 /**
1385 * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
1386 * @epf: NTB endpoint function device
1387 *
1388 * Initialize both the endpoint controllers associated with NTB function device.
1389 * Invoked when a primary interface or secondary interface is bound to EPC
1390 * device. This function will succeed only when EPC is bound to both the
1391 * interfaces.
1392 *
1393 * Returns: Zero for success, or an error code in case of failure
1394 */
epf_ntb_bind(struct pci_epf * epf)1395 static int epf_ntb_bind(struct pci_epf *epf)
1396 {
1397 struct epf_ntb *ntb = epf_get_drvdata(epf);
1398 struct device *dev = &epf->dev;
1399 int ret;
1400
1401 if (!epf->epc) {
1402 dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
1403 return 0;
1404 }
1405
1406 ret = epf_ntb_init_epc_bar(ntb);
1407 if (ret) {
1408 dev_err(dev, "Failed to create NTB EPC\n");
1409 goto err_bar_init;
1410 }
1411
1412 ret = epf_ntb_config_spad_bar_alloc(ntb);
1413 if (ret) {
1414 dev_err(dev, "Failed to allocate BAR memory\n");
1415 goto err_bar_alloc;
1416 }
1417
1418 ret = epf_ntb_epc_init(ntb);
1419 if (ret) {
1420 dev_err(dev, "Failed to initialize EPC\n");
1421 goto err_bar_alloc;
1422 }
1423
1424 epf_set_drvdata(epf, ntb);
1425
1426 pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid;
1427 pci_vntb_table[0].vendor = ntb->vntb_vid;
1428 pci_vntb_table[0].device = ntb->vntb_pid;
1429
1430 ret = pci_register_driver(&vntb_pci_driver);
1431 if (ret) {
1432 dev_err(dev, "failure register vntb pci driver\n");
1433 goto err_epc_cleanup;
1434 }
1435
1436 ret = vpci_scan_bus(ntb);
1437 if (ret)
1438 goto err_unregister;
1439
1440 return 0;
1441
1442 err_unregister:
1443 pci_unregister_driver(&vntb_pci_driver);
1444 err_epc_cleanup:
1445 epf_ntb_epc_cleanup(ntb);
1446 err_bar_alloc:
1447 epf_ntb_config_spad_bar_free(ntb);
1448
1449 err_bar_init:
1450 epf_ntb_epc_destroy(ntb);
1451
1452 return ret;
1453 }
1454
1455 /**
1456 * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
1457 * @epf: NTB endpoint function device
1458 *
1459 * Cleanup the initialization from epf_ntb_bind()
1460 */
epf_ntb_unbind(struct pci_epf * epf)1461 static void epf_ntb_unbind(struct pci_epf *epf)
1462 {
1463 struct epf_ntb *ntb = epf_get_drvdata(epf);
1464
1465 epf_ntb_epc_cleanup(ntb);
1466 epf_ntb_config_spad_bar_free(ntb);
1467 epf_ntb_epc_destroy(ntb);
1468
1469 pci_unregister_driver(&vntb_pci_driver);
1470 }
1471
1472 // EPF driver probe
1473 static const struct pci_epf_ops epf_ntb_ops = {
1474 .bind = epf_ntb_bind,
1475 .unbind = epf_ntb_unbind,
1476 .add_cfs = epf_ntb_add_cfs,
1477 };
1478
1479 /**
1480 * epf_ntb_probe() - Probe NTB function driver
1481 * @epf: NTB endpoint function device
1482 * @id: NTB endpoint function device ID
1483 *
1484 * Probe NTB function driver when endpoint function bus detects a NTB
1485 * endpoint function.
1486 *
1487 * Returns: Zero for success, or an error code in case of failure
1488 */
epf_ntb_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)1489 static int epf_ntb_probe(struct pci_epf *epf,
1490 const struct pci_epf_device_id *id)
1491 {
1492 struct epf_ntb *ntb;
1493 struct device *dev;
1494 int i;
1495
1496 dev = &epf->dev;
1497
1498 ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
1499 if (!ntb)
1500 return -ENOMEM;
1501
1502 epf->header = &epf_ntb_header;
1503 ntb->epf = epf;
1504 ntb->vbus_number = 0xff;
1505
1506 /* Initially, no bar is assigned */
1507 for (i = 0; i < VNTB_BAR_NUM; i++)
1508 ntb->epf_ntb_bar[i] = NO_BAR;
1509
1510 epf_set_drvdata(epf, ntb);
1511
1512 dev_info(dev, "pci-ep epf driver loaded\n");
1513 return 0;
1514 }
1515
1516 static const struct pci_epf_device_id epf_ntb_ids[] = {
1517 {
1518 .name = "pci_epf_vntb",
1519 },
1520 {},
1521 };
1522
1523 static struct pci_epf_driver epf_ntb_driver = {
1524 .driver.name = "pci_epf_vntb",
1525 .probe = epf_ntb_probe,
1526 .id_table = epf_ntb_ids,
1527 .ops = &epf_ntb_ops,
1528 .owner = THIS_MODULE,
1529 };
1530
epf_ntb_init(void)1531 static int __init epf_ntb_init(void)
1532 {
1533 int ret;
1534
1535 kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
1536 WQ_HIGHPRI, 0);
1537 ret = pci_epf_register_driver(&epf_ntb_driver);
1538 if (ret) {
1539 destroy_workqueue(kpcintb_workqueue);
1540 pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
1541 return ret;
1542 }
1543
1544 return 0;
1545 }
1546 module_init(epf_ntb_init);
1547
epf_ntb_exit(void)1548 static void __exit epf_ntb_exit(void)
1549 {
1550 pci_epf_unregister_driver(&epf_ntb_driver);
1551 destroy_workqueue(kpcintb_workqueue);
1552 }
1553 module_exit(epf_ntb_exit);
1554
1555 MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
1556 MODULE_AUTHOR("Frank Li <Frank.li@nxp.com>");
1557 MODULE_LICENSE("GPL v2");
1558