1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Endpoint Function Driver to implement Non-Transparent Bridge functionality
4 * Between PCI RC and EP
5 *
6 * Copyright (C) 2020 Texas Instruments
7 * Copyright (C) 2022 NXP
8 *
9 * Based on pci-epf-ntb.c
10 * Author: Frank Li <Frank.Li@nxp.com>
11 * Author: Kishon Vijay Abraham I <kishon@ti.com>
12 */
13
14 /*
15 * +------------+ +---------------------------------------+
16 * | | | |
17 * +------------+ | +--------------+
18 * | NTB | | | NTB |
19 * | NetDev | | | NetDev |
20 * +------------+ | +--------------+
21 * | NTB | | | NTB |
22 * | Transfer | | | Transfer |
23 * +------------+ | +--------------+
24 * | | | | |
25 * | PCI NTB | | | |
26 * | EPF | | | |
27 * | Driver | | | PCI Virtual |
28 * | | +---------------+ | NTB Driver |
29 * | | | PCI EP NTB |<------>| |
30 * | | | FN Driver | | |
31 * +------------+ +---------------+ +--------------+
32 * | | | | | |
33 * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI |
34 * | | PCI | | | Bus |
35 * +------------+ +---------------+--------+--------------+
36 * PCIe Root Port PCI EP
37 */
38
39 #include <linux/atomic.h>
40 #include <linux/delay.h>
41 #include <linux/io.h>
42 #include <linux/module.h>
43 #include <linux/slab.h>
44
45 #include <linux/pci-ep-msi.h>
46 #include <linux/pci-epc.h>
47 #include <linux/pci-epf.h>
48 #include <linux/ntb.h>
49
50 static struct workqueue_struct *kpcintb_workqueue;
51
52 #define COMMAND_CONFIGURE_DOORBELL 1
53 #define COMMAND_TEARDOWN_DOORBELL 2
54 #define COMMAND_CONFIGURE_MW 3
55 #define COMMAND_TEARDOWN_MW 4
56 #define COMMAND_LINK_UP 5
57 #define COMMAND_LINK_DOWN 6
58
59 #define COMMAND_STATUS_OK 1
60 #define COMMAND_STATUS_ERROR 2
61
62 #define LINK_STATUS_UP BIT(0)
63
64 #define SPAD_COUNT 64
65 #define DB_COUNT 4
66 #define NTB_MW_OFFSET 2
67 #define DB_COUNT_MASK GENMASK(15, 0)
68 #define MSIX_ENABLE BIT(16)
69 #define MAX_DB_COUNT 32
70 #define MAX_MW 4
71
72 enum epf_ntb_bar {
73 BAR_CONFIG,
74 BAR_DB,
75 BAR_MW1,
76 BAR_MW2,
77 BAR_MW3,
78 BAR_MW4,
79 VNTB_BAR_NUM,
80 };
81
82 /*
83 * +--------------------------------------------------+ Base
84 * | |
85 * | |
86 * | |
87 * | Common Control Register |
88 * | |
89 * | |
90 * | |
91 * +-----------------------+--------------------------+ Base+spad_offset
92 * | | |
93 * | Peer Spad Space | Spad Space |
94 * | | |
95 * | | |
96 * +-----------------------+--------------------------+ Base+spad_offset
97 * | | | +spad_count * 4
98 * | | |
99 * | Spad Space | Peer Spad Space |
100 * | | |
101 * +-----------------------+--------------------------+
102 * Virtual PCI PCIe Endpoint
103 * NTB Driver NTB Driver
104 */
105 struct epf_ntb_ctrl {
106 u32 command;
107 u32 argument;
108 u16 command_status;
109 u16 link_status;
110 u32 topology;
111 u64 addr;
112 u64 size;
113 u32 num_mws;
114 u32 reserved;
115 u32 spad_offset;
116 u32 spad_count;
117 u32 db_entry_size;
118 u32 db_data[MAX_DB_COUNT];
119 u32 db_offset[MAX_DB_COUNT];
120 } __packed;
121
122 struct epf_ntb {
123 struct ntb_dev ntb;
124 struct pci_epf *epf;
125 struct config_group group;
126
127 u32 num_mws;
128 u32 db_count;
129 u32 spad_count;
130 u64 mws_size[MAX_MW];
131 atomic64_t db;
132 u32 vbus_number;
133 u16 vntb_pid;
134 u16 vntb_vid;
135
136 bool linkup;
137 bool msi_doorbell;
138 u32 spad_size;
139
140 enum pci_barno epf_ntb_bar[VNTB_BAR_NUM];
141
142 struct epf_ntb_ctrl *reg;
143
144 u32 *epf_db;
145
146 phys_addr_t vpci_mw_phy[MAX_MW];
147 void __iomem *vpci_mw_addr[MAX_MW];
148
149 struct delayed_work cmd_handler;
150 };
151
152 #define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
153 #define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb)
154
155 static struct pci_epf_header epf_ntb_header = {
156 .vendorid = PCI_ANY_ID,
157 .deviceid = PCI_ANY_ID,
158 .baseclass_code = PCI_BASE_CLASS_MEMORY,
159 .interrupt_pin = PCI_INTERRUPT_INTA,
160 };
161
162 /**
163 * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host (VHOST)
164 * @ntb: NTB device that facilitates communication between HOST and VHOST
165 * @link_up: true or false indicating Link is UP or Down
166 *
167 * Once NTB function in HOST invoke ntb_link_enable(),
168 * this NTB function driver will trigger a link event to VHOST.
169 *
170 * Returns: Zero for success, or an error code in case of failure
171 */
epf_ntb_link_up(struct epf_ntb * ntb,bool link_up)172 static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
173 {
174 if (link_up)
175 ntb->reg->link_status |= LINK_STATUS_UP;
176 else
177 ntb->reg->link_status &= ~LINK_STATUS_UP;
178
179 ntb_link_event(&ntb->ntb);
180 return 0;
181 }
182
183 /**
184 * epf_ntb_configure_mw() - Configure the Outbound Address Space for VHOST
185 * to access the memory window of HOST
186 * @ntb: NTB device that facilitates communication between HOST and VHOST
187 * @mw: Index of the memory window (either 0, 1, 2 or 3)
188 *
189 * EP Outbound Window
190 * +--------+ +-----------+
191 * | | | |
192 * | | | |
193 * | | | |
194 * | | | |
195 * | | +-----------+
196 * | Virtual| | Memory Win|
197 * | NTB | -----------> | |
198 * | Driver | | |
199 * | | +-----------+
200 * | | | |
201 * | | | |
202 * +--------+ +-----------+
203 * VHOST PCI EP
204 *
205 * Returns: Zero for success, or an error code in case of failure
206 */
epf_ntb_configure_mw(struct epf_ntb * ntb,u32 mw)207 static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
208 {
209 phys_addr_t phys_addr;
210 u8 func_no, vfunc_no;
211 u64 addr, size;
212 int ret = 0;
213
214 phys_addr = ntb->vpci_mw_phy[mw];
215 addr = ntb->reg->addr;
216 size = ntb->reg->size;
217
218 func_no = ntb->epf->func_no;
219 vfunc_no = ntb->epf->vfunc_no;
220
221 ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size);
222 if (ret)
223 dev_err(&ntb->epf->epc->dev,
224 "Failed to map memory window %d address\n", mw);
225 return ret;
226 }
227
228 /**
229 * epf_ntb_teardown_mw() - Teardown the configured OB ATU
230 * @ntb: NTB device that facilitates communication between HOST and VHOST
231 * @mw: Index of the memory window (either 0, 1, 2 or 3)
232 *
233 * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
234 * pci_epc_unmap_addr()
235 */
epf_ntb_teardown_mw(struct epf_ntb * ntb,u32 mw)236 static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
237 {
238 pci_epc_unmap_addr(ntb->epf->epc,
239 ntb->epf->func_no,
240 ntb->epf->vfunc_no,
241 ntb->vpci_mw_phy[mw]);
242 }
243
244 /**
245 * epf_ntb_cmd_handler() - Handle commands provided by the NTB HOST
246 * @work: work_struct for the epf_ntb_epc
247 *
248 * Workqueue function that gets invoked for the two epf_ntb_epc
249 * periodically (once every 5ms) to see if it has received any commands
250 * from NTB HOST. The HOST can send commands to configure doorbell or
251 * configure memory window or to update link status.
252 */
epf_ntb_cmd_handler(struct work_struct * work)253 static void epf_ntb_cmd_handler(struct work_struct *work)
254 {
255 struct epf_ntb_ctrl *ctrl;
256 u32 command, argument;
257 struct epf_ntb *ntb;
258 struct device *dev;
259 int ret;
260 int i;
261
262 ntb = container_of(work, struct epf_ntb, cmd_handler.work);
263
264 for (i = 1; i < ntb->db_count && !ntb->msi_doorbell; i++) {
265 if (ntb->epf_db[i]) {
266 atomic64_or(1 << (i - 1), &ntb->db);
267 ntb_db_event(&ntb->ntb, i);
268 ntb->epf_db[i] = 0;
269 }
270 }
271
272 ctrl = ntb->reg;
273 command = ctrl->command;
274 if (!command)
275 goto reset_handler;
276 argument = ctrl->argument;
277
278 ctrl->command = 0;
279 ctrl->argument = 0;
280
281 ctrl = ntb->reg;
282 dev = &ntb->epf->dev;
283
284 switch (command) {
285 case COMMAND_CONFIGURE_DOORBELL:
286 ctrl->command_status = COMMAND_STATUS_OK;
287 break;
288 case COMMAND_TEARDOWN_DOORBELL:
289 ctrl->command_status = COMMAND_STATUS_OK;
290 break;
291 case COMMAND_CONFIGURE_MW:
292 ret = epf_ntb_configure_mw(ntb, argument);
293 if (ret < 0)
294 ctrl->command_status = COMMAND_STATUS_ERROR;
295 else
296 ctrl->command_status = COMMAND_STATUS_OK;
297 break;
298 case COMMAND_TEARDOWN_MW:
299 epf_ntb_teardown_mw(ntb, argument);
300 ctrl->command_status = COMMAND_STATUS_OK;
301 break;
302 case COMMAND_LINK_UP:
303 ntb->linkup = true;
304 ret = epf_ntb_link_up(ntb, true);
305 if (ret < 0)
306 ctrl->command_status = COMMAND_STATUS_ERROR;
307 else
308 ctrl->command_status = COMMAND_STATUS_OK;
309 goto reset_handler;
310 case COMMAND_LINK_DOWN:
311 ntb->linkup = false;
312 ret = epf_ntb_link_up(ntb, false);
313 if (ret < 0)
314 ctrl->command_status = COMMAND_STATUS_ERROR;
315 else
316 ctrl->command_status = COMMAND_STATUS_OK;
317 break;
318 default:
319 dev_err(dev, "UNKNOWN command: %d\n", command);
320 break;
321 }
322
323 reset_handler:
324 queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
325 ntb->msi_doorbell ? msecs_to_jiffies(500) : msecs_to_jiffies(5));
326 }
327
epf_ntb_doorbell_handler(int irq,void * data)328 static irqreturn_t epf_ntb_doorbell_handler(int irq, void *data)
329 {
330 struct epf_ntb *ntb = data;
331 int i;
332
333 for (i = 1; i < ntb->db_count; i++)
334 if (irq == ntb->epf->db_msg[i].virq) {
335 atomic64_or(1 << (i - 1), &ntb->db);
336 ntb_db_event(&ntb->ntb, i);
337 }
338
339 return IRQ_HANDLED;
340 }
341
342 /**
343 * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
344 * @ntb: EPC associated with one of the HOST which holds peer's outbound
345 * address.
346 *
347 * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
348 * self scratchpad region (removes inbound ATU configuration). While BAR0 is
349 * the default self scratchpad BAR, an NTB could have other BARs for self
350 * scratchpad (because of reserved BARs). This function can get the exact BAR
351 * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
352 *
353 * Please note the self scratchpad region and config region is combined to
354 * a single region and mapped using the same BAR. Also note VHOST's peer
355 * scratchpad is HOST's self scratchpad.
356 *
357 * Returns: void
358 */
epf_ntb_config_sspad_bar_clear(struct epf_ntb * ntb)359 static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
360 {
361 struct pci_epf_bar *epf_bar;
362 enum pci_barno barno;
363
364 barno = ntb->epf_ntb_bar[BAR_CONFIG];
365 epf_bar = &ntb->epf->bar[barno];
366
367 pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
368 }
369
370 /**
371 * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
372 * @ntb: NTB device that facilitates communication between HOST and VHOST
373 *
374 * Map BAR0 of EP CONTROLLER which contains the VHOST's config and
375 * self scratchpad region.
376 *
377 * Please note the self scratchpad region and config region is combined to
378 * a single region and mapped using the same BAR.
379 *
380 * Returns: Zero for success, or an error code in case of failure
381 */
epf_ntb_config_sspad_bar_set(struct epf_ntb * ntb)382 static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
383 {
384 struct pci_epf_bar *epf_bar;
385 enum pci_barno barno;
386 u8 func_no, vfunc_no;
387 struct device *dev;
388 int ret;
389
390 dev = &ntb->epf->dev;
391 func_no = ntb->epf->func_no;
392 vfunc_no = ntb->epf->vfunc_no;
393 barno = ntb->epf_ntb_bar[BAR_CONFIG];
394 epf_bar = &ntb->epf->bar[barno];
395
396 ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar);
397 if (ret) {
398 dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n");
399 return ret;
400 }
401 return 0;
402 }
403
404 /**
405 * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
406 * config + scratchpad region
407 * @ntb: NTB device that facilitates communication between HOST and VHOST
408 */
epf_ntb_config_spad_bar_free(struct epf_ntb * ntb)409 static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
410 {
411 enum pci_barno barno;
412
413 barno = ntb->epf_ntb_bar[BAR_CONFIG];
414 pci_epf_free_space(ntb->epf, ntb->reg, barno, 0);
415 }
416
417 /**
418 * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
419 * region
420 * @ntb: NTB device that facilitates communication between HOST and VHOST
421 *
422 * Allocate the Local Memory mentioned in the above diagram. The size of
423 * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
424 * is obtained from "spad-count" configfs entry.
425 *
426 * Returns: Zero for success, or an error code in case of failure
427 */
epf_ntb_config_spad_bar_alloc(struct epf_ntb * ntb)428 static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
429 {
430 enum pci_barno barno;
431 struct epf_ntb_ctrl *ctrl;
432 u32 spad_size, ctrl_size;
433 struct pci_epf *epf = ntb->epf;
434 struct device *dev = &epf->dev;
435 u32 spad_count;
436 void *base;
437 int i;
438 const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc,
439 epf->func_no,
440 epf->vfunc_no);
441 barno = ntb->epf_ntb_bar[BAR_CONFIG];
442 spad_count = ntb->spad_count;
443
444 ctrl_size = ALIGN(sizeof(struct epf_ntb_ctrl), sizeof(u32));
445 spad_size = 2 * spad_count * sizeof(u32);
446
447 base = pci_epf_alloc_space(epf, ctrl_size + spad_size,
448 barno, epc_features, 0);
449 if (!base) {
450 dev_err(dev, "Config/Status/SPAD alloc region fail\n");
451 return -ENOMEM;
452 }
453
454 ntb->reg = base;
455
456 ctrl = ntb->reg;
457 ctrl->spad_offset = ctrl_size;
458
459 ctrl->spad_count = spad_count;
460 ctrl->num_mws = ntb->num_mws;
461 ntb->spad_size = spad_size;
462
463 ctrl->db_entry_size = sizeof(u32);
464
465 for (i = 0; i < ntb->db_count; i++) {
466 ntb->reg->db_data[i] = 1 + i;
467 ntb->reg->db_offset[i] = 0;
468 }
469
470 return 0;
471 }
472
473 /**
474 * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
475 * @ntb: NTB device that facilitates communication between HOST and VHOST
476 *
477 * Configure MSI/MSI-X capability for each interface with number of
478 * interrupts equal to "db_count" configfs entry.
479 *
480 * Returns: Zero for success, or an error code in case of failure
481 */
epf_ntb_configure_interrupt(struct epf_ntb * ntb)482 static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
483 {
484 const struct pci_epc_features *epc_features;
485 struct device *dev;
486 u32 db_count;
487 int ret;
488
489 dev = &ntb->epf->dev;
490
491 epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
492
493 if (!(epc_features->msix_capable || epc_features->msi_capable)) {
494 dev_err(dev, "MSI or MSI-X is required for doorbell\n");
495 return -EINVAL;
496 }
497
498 db_count = ntb->db_count;
499 if (db_count > MAX_DB_COUNT) {
500 dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
501 return -EINVAL;
502 }
503
504 ntb->db_count = db_count;
505
506 if (epc_features->msi_capable) {
507 ret = pci_epc_set_msi(ntb->epf->epc,
508 ntb->epf->func_no,
509 ntb->epf->vfunc_no,
510 16);
511 if (ret) {
512 dev_err(dev, "MSI configuration failed\n");
513 return ret;
514 }
515 }
516
517 return 0;
518 }
519
epf_ntb_db_bar_init_msi_doorbell(struct epf_ntb * ntb,struct pci_epf_bar * db_bar,const struct pci_epc_features * epc_features,enum pci_barno barno)520 static int epf_ntb_db_bar_init_msi_doorbell(struct epf_ntb *ntb,
521 struct pci_epf_bar *db_bar,
522 const struct pci_epc_features *epc_features,
523 enum pci_barno barno)
524 {
525 struct pci_epf *epf = ntb->epf;
526 dma_addr_t low, high;
527 struct msi_msg *msg;
528 size_t sz;
529 int ret;
530 int i;
531
532 ret = pci_epf_alloc_doorbell(epf, ntb->db_count);
533 if (ret)
534 return ret;
535
536 for (i = 0; i < ntb->db_count; i++) {
537 ret = request_irq(epf->db_msg[i].virq, epf_ntb_doorbell_handler,
538 0, "pci_epf_vntb_db", ntb);
539
540 if (ret) {
541 dev_err(&epf->dev,
542 "Failed to request doorbell IRQ: %d\n",
543 epf->db_msg[i].virq);
544 goto err_free_irq;
545 }
546 }
547
548 msg = &epf->db_msg[0].msg;
549
550 high = 0;
551 low = (u64)msg->address_hi << 32 | msg->address_lo;
552
553 for (i = 0; i < ntb->db_count; i++) {
554 struct msi_msg *msg = &epf->db_msg[i].msg;
555 dma_addr_t addr = (u64)msg->address_hi << 32 | msg->address_lo;
556
557 low = min(low, addr);
558 high = max(high, addr);
559 }
560
561 sz = high - low + sizeof(u32);
562
563 ret = pci_epf_assign_bar_space(epf, sz, barno, epc_features, 0, low);
564 if (ret) {
565 dev_err(&epf->dev, "Failed to assign Doorbell BAR space\n");
566 goto err_free_irq;
567 }
568
569 ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no,
570 ntb->epf->vfunc_no, db_bar);
571 if (ret) {
572 dev_err(&epf->dev, "Failed to set Doorbell BAR\n");
573 goto err_free_irq;
574 }
575
576 for (i = 0; i < ntb->db_count; i++) {
577 struct msi_msg *msg = &epf->db_msg[i].msg;
578 dma_addr_t addr;
579 size_t offset;
580
581 ret = pci_epf_align_inbound_addr(epf, db_bar->barno,
582 ((u64)msg->address_hi << 32) | msg->address_lo,
583 &addr, &offset);
584
585 if (ret) {
586 ntb->msi_doorbell = false;
587 goto err_free_irq;
588 }
589
590 ntb->reg->db_data[i] = msg->data;
591 ntb->reg->db_offset[i] = offset;
592 }
593
594 ntb->reg->db_entry_size = 0;
595
596 ntb->msi_doorbell = true;
597
598 return 0;
599
600 err_free_irq:
601 for (i--; i >= 0; i--)
602 free_irq(epf->db_msg[i].virq, ntb);
603
604 pci_epf_free_doorbell(ntb->epf);
605 return ret;
606 }
607
608 /**
609 * epf_ntb_db_bar_init() - Configure Doorbell window BARs
610 * @ntb: NTB device that facilitates communication between HOST and VHOST
611 *
612 * Returns: Zero for success, or an error code in case of failure
613 */
epf_ntb_db_bar_init(struct epf_ntb * ntb)614 static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
615 {
616 const struct pci_epc_features *epc_features;
617 struct device *dev = &ntb->epf->dev;
618 int ret;
619 struct pci_epf_bar *epf_bar;
620 void *mw_addr;
621 enum pci_barno barno;
622 size_t size = sizeof(u32) * ntb->db_count;
623
624 epc_features = pci_epc_get_features(ntb->epf->epc,
625 ntb->epf->func_no,
626 ntb->epf->vfunc_no);
627 barno = ntb->epf_ntb_bar[BAR_DB];
628 epf_bar = &ntb->epf->bar[barno];
629
630 ret = epf_ntb_db_bar_init_msi_doorbell(ntb, epf_bar, epc_features, barno);
631 if (ret) {
632 /* fall back to polling mode */
633 mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, epc_features, 0);
634 if (!mw_addr) {
635 dev_err(dev, "Failed to allocate OB address\n");
636 return -ENOMEM;
637 }
638
639 ntb->epf_db = mw_addr;
640
641 ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no,
642 ntb->epf->vfunc_no, epf_bar);
643 if (ret) {
644 dev_err(dev, "Doorbell BAR set failed\n");
645 goto err_alloc_peer_mem;
646 }
647 }
648 return ret;
649
650 err_alloc_peer_mem:
651 pci_epf_free_space(ntb->epf, mw_addr, barno, 0);
652 return -1;
653 }
654
655 static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
656
657 /**
658 * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
659 * allocated in peer's outbound address space
660 * @ntb: NTB device that facilitates communication between HOST and VHOST
661 */
epf_ntb_db_bar_clear(struct epf_ntb * ntb)662 static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
663 {
664 enum pci_barno barno;
665
666 if (ntb->msi_doorbell) {
667 int i;
668
669 for (i = 0; i < ntb->db_count; i++)
670 free_irq(ntb->epf->db_msg[i].virq, ntb);
671 }
672
673 if (ntb->epf->db_msg)
674 pci_epf_free_doorbell(ntb->epf);
675
676 barno = ntb->epf_ntb_bar[BAR_DB];
677 pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
678 pci_epc_clear_bar(ntb->epf->epc,
679 ntb->epf->func_no,
680 ntb->epf->vfunc_no,
681 &ntb->epf->bar[barno]);
682 }
683
684 /**
685 * epf_ntb_mw_bar_init() - Configure Memory window BARs
686 * @ntb: NTB device that facilitates communication between HOST and VHOST
687 *
688 * Returns: Zero for success, or an error code in case of failure
689 */
epf_ntb_mw_bar_init(struct epf_ntb * ntb)690 static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
691 {
692 int ret = 0;
693 int i;
694 u64 size;
695 enum pci_barno barno;
696 struct device *dev = &ntb->epf->dev;
697
698 for (i = 0; i < ntb->num_mws; i++) {
699 size = ntb->mws_size[i];
700 barno = ntb->epf_ntb_bar[BAR_MW1 + i];
701
702 ntb->epf->bar[barno].barno = barno;
703 ntb->epf->bar[barno].size = size;
704 ntb->epf->bar[barno].addr = NULL;
705 ntb->epf->bar[barno].phys_addr = 0;
706 ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
707 PCI_BASE_ADDRESS_MEM_TYPE_64 :
708 PCI_BASE_ADDRESS_MEM_TYPE_32;
709
710 ret = pci_epc_set_bar(ntb->epf->epc,
711 ntb->epf->func_no,
712 ntb->epf->vfunc_no,
713 &ntb->epf->bar[barno]);
714 if (ret) {
715 dev_err(dev, "MW set failed\n");
716 goto err_alloc_mem;
717 }
718
719 /* Allocate EPC outbound memory windows to vpci vntb device */
720 ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc,
721 &ntb->vpci_mw_phy[i],
722 size);
723 if (!ntb->vpci_mw_addr[i]) {
724 ret = -ENOMEM;
725 dev_err(dev, "Failed to allocate source address\n");
726 goto err_set_bar;
727 }
728 }
729
730 return ret;
731
732 err_set_bar:
733 pci_epc_clear_bar(ntb->epf->epc,
734 ntb->epf->func_no,
735 ntb->epf->vfunc_no,
736 &ntb->epf->bar[barno]);
737 err_alloc_mem:
738 epf_ntb_mw_bar_clear(ntb, i);
739 return ret;
740 }
741
742 /**
743 * epf_ntb_mw_bar_clear() - Clear Memory window BARs
744 * @ntb: NTB device that facilitates communication between HOST and VHOST
745 * @num_mws: the number of Memory window BARs that to be cleared
746 */
epf_ntb_mw_bar_clear(struct epf_ntb * ntb,int num_mws)747 static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
748 {
749 enum pci_barno barno;
750 int i;
751
752 for (i = 0; i < num_mws; i++) {
753 barno = ntb->epf_ntb_bar[BAR_MW1 + i];
754 pci_epc_clear_bar(ntb->epf->epc,
755 ntb->epf->func_no,
756 ntb->epf->vfunc_no,
757 &ntb->epf->bar[barno]);
758
759 pci_epc_mem_free_addr(ntb->epf->epc,
760 ntb->vpci_mw_phy[i],
761 ntb->vpci_mw_addr[i],
762 ntb->mws_size[i]);
763 }
764 }
765
766 /**
767 * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
768 * @ntb: NTB device that facilitates communication between HOST and VHOST
769 *
770 * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
771 */
epf_ntb_epc_destroy(struct epf_ntb * ntb)772 static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
773 {
774 pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
775 pci_epc_put(ntb->epf->epc);
776 }
777
778
779 /**
780 * epf_ntb_is_bar_used() - Check if a bar is used in the ntb configuration
781 * @ntb: NTB device that facilitates communication between HOST and VHOST
782 * @barno: Checked bar number
783 *
784 * Returns: true if used, false if free.
785 */
epf_ntb_is_bar_used(struct epf_ntb * ntb,enum pci_barno barno)786 static bool epf_ntb_is_bar_used(struct epf_ntb *ntb,
787 enum pci_barno barno)
788 {
789 int i;
790
791 for (i = 0; i < VNTB_BAR_NUM; i++) {
792 if (ntb->epf_ntb_bar[i] == barno)
793 return true;
794 }
795
796 return false;
797 }
798
799 /**
800 * epf_ntb_find_bar() - Assign BAR number when no configuration is provided
801 * @ntb: NTB device that facilitates communication between HOST and VHOST
802 * @epc_features: The features provided by the EPC specific to this EPF
803 * @bar: NTB BAR index
804 * @barno: Bar start index
805 *
806 * When the BAR configuration was not provided through the userspace
807 * configuration, automatically assign BAR as it has been historically
808 * done by this endpoint function.
809 *
810 * Returns: the BAR number found, if any. -1 otherwise
811 */
epf_ntb_find_bar(struct epf_ntb * ntb,const struct pci_epc_features * epc_features,enum epf_ntb_bar bar,enum pci_barno barno)812 static int epf_ntb_find_bar(struct epf_ntb *ntb,
813 const struct pci_epc_features *epc_features,
814 enum epf_ntb_bar bar,
815 enum pci_barno barno)
816 {
817 while (ntb->epf_ntb_bar[bar] < 0) {
818 barno = pci_epc_get_next_free_bar(epc_features, barno);
819 if (barno < 0)
820 break; /* No more BAR available */
821
822 /*
823 * Verify if the BAR found is not already assigned
824 * through the provided configuration
825 */
826 if (!epf_ntb_is_bar_used(ntb, barno))
827 ntb->epf_ntb_bar[bar] = barno;
828
829 barno += 1;
830 }
831
832 return barno;
833 }
834
835 /**
836 * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
837 * constructs (scratchpad region, doorbell, memorywindow)
838 * @ntb: NTB device that facilitates communication between HOST and VHOST
839 *
840 * Returns: Zero for success, or an error code in case of failure
841 */
epf_ntb_init_epc_bar(struct epf_ntb * ntb)842 static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
843 {
844 const struct pci_epc_features *epc_features;
845 enum pci_barno barno;
846 enum epf_ntb_bar bar;
847 struct device *dev;
848 u32 num_mws;
849 int i;
850
851 barno = BAR_0;
852 num_mws = ntb->num_mws;
853 dev = &ntb->epf->dev;
854 epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
855
856 /* These are required BARs which are mandatory for NTB functionality */
857 for (bar = BAR_CONFIG; bar <= BAR_MW1; bar++) {
858 barno = epf_ntb_find_bar(ntb, epc_features, bar, barno);
859 if (barno < 0) {
860 dev_err(dev, "Fail to get NTB function BAR\n");
861 return -ENOENT;
862 }
863 }
864
865 /* These are optional BARs which don't impact NTB functionality */
866 for (bar = BAR_MW1, i = 1; i < num_mws; bar++, i++) {
867 barno = epf_ntb_find_bar(ntb, epc_features, bar, barno);
868 if (barno < 0) {
869 ntb->num_mws = i;
870 dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
871 }
872 }
873
874 return 0;
875 }
876
877 /**
878 * epf_ntb_epc_init() - Initialize NTB interface
879 * @ntb: NTB device that facilitates communication between HOST and VHOST
880 *
881 * Wrapper to initialize a particular EPC interface and start the workqueue
882 * to check for commands from HOST. This function will write to the
883 * EP controller HW for configuring it.
884 *
885 * Returns: Zero for success, or an error code in case of failure
886 */
epf_ntb_epc_init(struct epf_ntb * ntb)887 static int epf_ntb_epc_init(struct epf_ntb *ntb)
888 {
889 u8 func_no, vfunc_no;
890 struct pci_epc *epc;
891 struct pci_epf *epf;
892 struct device *dev;
893 int ret;
894
895 epf = ntb->epf;
896 dev = &epf->dev;
897 epc = epf->epc;
898 func_no = ntb->epf->func_no;
899 vfunc_no = ntb->epf->vfunc_no;
900
901 ret = epf_ntb_config_sspad_bar_set(ntb);
902 if (ret) {
903 dev_err(dev, "Config/self SPAD BAR init failed");
904 return ret;
905 }
906
907 ret = epf_ntb_configure_interrupt(ntb);
908 if (ret) {
909 dev_err(dev, "Interrupt configuration failed\n");
910 goto err_config_interrupt;
911 }
912
913 ret = epf_ntb_db_bar_init(ntb);
914 if (ret) {
915 dev_err(dev, "DB BAR init failed\n");
916 goto err_db_bar_init;
917 }
918
919 ret = epf_ntb_mw_bar_init(ntb);
920 if (ret) {
921 dev_err(dev, "MW BAR init failed\n");
922 goto err_mw_bar_init;
923 }
924
925 if (vfunc_no <= 1) {
926 ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
927 if (ret) {
928 dev_err(dev, "Configuration header write failed\n");
929 goto err_write_header;
930 }
931 }
932
933 INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler);
934 queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
935
936 return 0;
937
938 err_write_header:
939 epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
940 err_mw_bar_init:
941 epf_ntb_db_bar_clear(ntb);
942 err_db_bar_init:
943 err_config_interrupt:
944 epf_ntb_config_sspad_bar_clear(ntb);
945
946 return ret;
947 }
948
949
950 /**
951 * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
952 * @ntb: NTB device that facilitates communication between HOST and VHOST
953 *
954 * Wrapper to cleanup all NTB interfaces.
955 */
epf_ntb_epc_cleanup(struct epf_ntb * ntb)956 static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
957 {
958 epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
959 epf_ntb_db_bar_clear(ntb);
960 epf_ntb_config_sspad_bar_clear(ntb);
961 }
962
963 #define EPF_NTB_R(_name) \
964 static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
965 char *page) \
966 { \
967 struct config_group *group = to_config_group(item); \
968 struct epf_ntb *ntb = to_epf_ntb(group); \
969 \
970 return sprintf(page, "%d\n", ntb->_name); \
971 }
972
973 #define EPF_NTB_W(_name) \
974 static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
975 const char *page, size_t len) \
976 { \
977 struct config_group *group = to_config_group(item); \
978 struct epf_ntb *ntb = to_epf_ntb(group); \
979 u32 val; \
980 int ret; \
981 \
982 ret = kstrtou32(page, 0, &val); \
983 if (ret) \
984 return ret; \
985 \
986 ntb->_name = val; \
987 \
988 return len; \
989 }
990
991 #define EPF_NTB_MW_R(_name) \
992 static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
993 char *page) \
994 { \
995 struct config_group *group = to_config_group(item); \
996 struct epf_ntb *ntb = to_epf_ntb(group); \
997 struct device *dev = &ntb->epf->dev; \
998 int win_no; \
999 \
1000 if (sscanf(#_name, "mw%d", &win_no) != 1) \
1001 return -EINVAL; \
1002 \
1003 if (win_no <= 0 || win_no > ntb->num_mws) { \
1004 dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
1005 return -EINVAL; \
1006 } \
1007 \
1008 return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
1009 }
1010
1011 #define EPF_NTB_MW_W(_name) \
1012 static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
1013 const char *page, size_t len) \
1014 { \
1015 struct config_group *group = to_config_group(item); \
1016 struct epf_ntb *ntb = to_epf_ntb(group); \
1017 struct device *dev = &ntb->epf->dev; \
1018 int win_no; \
1019 u64 val; \
1020 int ret; \
1021 \
1022 ret = kstrtou64(page, 0, &val); \
1023 if (ret) \
1024 return ret; \
1025 \
1026 if (sscanf(#_name, "mw%d", &win_no) != 1) \
1027 return -EINVAL; \
1028 \
1029 if (win_no <= 0 || win_no > ntb->num_mws) { \
1030 dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
1031 return -EINVAL; \
1032 } \
1033 \
1034 ntb->mws_size[win_no - 1] = val; \
1035 \
1036 return len; \
1037 }
1038
1039 #define EPF_NTB_BAR_R(_name, _id) \
1040 static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
1041 char *page) \
1042 { \
1043 struct config_group *group = to_config_group(item); \
1044 struct epf_ntb *ntb = to_epf_ntb(group); \
1045 \
1046 return sprintf(page, "%d\n", ntb->epf_ntb_bar[_id]); \
1047 }
1048
1049 #define EPF_NTB_BAR_W(_name, _id) \
1050 static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
1051 const char *page, size_t len) \
1052 { \
1053 struct config_group *group = to_config_group(item); \
1054 struct epf_ntb *ntb = to_epf_ntb(group); \
1055 int val; \
1056 int ret; \
1057 \
1058 ret = kstrtoint(page, 0, &val); \
1059 if (ret) \
1060 return ret; \
1061 \
1062 if (val < NO_BAR || val > BAR_5) \
1063 return -EINVAL; \
1064 \
1065 ntb->epf_ntb_bar[_id] = val; \
1066 \
1067 return len; \
1068 }
1069
epf_ntb_num_mws_store(struct config_item * item,const char * page,size_t len)1070 static ssize_t epf_ntb_num_mws_store(struct config_item *item,
1071 const char *page, size_t len)
1072 {
1073 struct config_group *group = to_config_group(item);
1074 struct epf_ntb *ntb = to_epf_ntb(group);
1075 u32 val;
1076 int ret;
1077
1078 ret = kstrtou32(page, 0, &val);
1079 if (ret)
1080 return ret;
1081
1082 if (val > MAX_MW)
1083 return -EINVAL;
1084
1085 ntb->num_mws = val;
1086
1087 return len;
1088 }
1089
1090 EPF_NTB_R(spad_count)
1091 EPF_NTB_W(spad_count)
1092 EPF_NTB_R(db_count)
1093 EPF_NTB_W(db_count)
1094 EPF_NTB_R(num_mws)
1095 EPF_NTB_R(vbus_number)
1096 EPF_NTB_W(vbus_number)
1097 EPF_NTB_R(vntb_pid)
1098 EPF_NTB_W(vntb_pid)
1099 EPF_NTB_R(vntb_vid)
1100 EPF_NTB_W(vntb_vid)
1101 EPF_NTB_MW_R(mw1)
1102 EPF_NTB_MW_W(mw1)
1103 EPF_NTB_MW_R(mw2)
1104 EPF_NTB_MW_W(mw2)
1105 EPF_NTB_MW_R(mw3)
1106 EPF_NTB_MW_W(mw3)
1107 EPF_NTB_MW_R(mw4)
1108 EPF_NTB_MW_W(mw4)
1109 EPF_NTB_BAR_R(ctrl_bar, BAR_CONFIG)
1110 EPF_NTB_BAR_W(ctrl_bar, BAR_CONFIG)
1111 EPF_NTB_BAR_R(db_bar, BAR_DB)
1112 EPF_NTB_BAR_W(db_bar, BAR_DB)
1113 EPF_NTB_BAR_R(mw1_bar, BAR_MW1)
1114 EPF_NTB_BAR_W(mw1_bar, BAR_MW1)
1115 EPF_NTB_BAR_R(mw2_bar, BAR_MW2)
1116 EPF_NTB_BAR_W(mw2_bar, BAR_MW2)
1117 EPF_NTB_BAR_R(mw3_bar, BAR_MW3)
1118 EPF_NTB_BAR_W(mw3_bar, BAR_MW3)
1119 EPF_NTB_BAR_R(mw4_bar, BAR_MW4)
1120 EPF_NTB_BAR_W(mw4_bar, BAR_MW4)
1121
1122 CONFIGFS_ATTR(epf_ntb_, spad_count);
1123 CONFIGFS_ATTR(epf_ntb_, db_count);
1124 CONFIGFS_ATTR(epf_ntb_, num_mws);
1125 CONFIGFS_ATTR(epf_ntb_, mw1);
1126 CONFIGFS_ATTR(epf_ntb_, mw2);
1127 CONFIGFS_ATTR(epf_ntb_, mw3);
1128 CONFIGFS_ATTR(epf_ntb_, mw4);
1129 CONFIGFS_ATTR(epf_ntb_, vbus_number);
1130 CONFIGFS_ATTR(epf_ntb_, vntb_pid);
1131 CONFIGFS_ATTR(epf_ntb_, vntb_vid);
1132 CONFIGFS_ATTR(epf_ntb_, ctrl_bar);
1133 CONFIGFS_ATTR(epf_ntb_, db_bar);
1134 CONFIGFS_ATTR(epf_ntb_, mw1_bar);
1135 CONFIGFS_ATTR(epf_ntb_, mw2_bar);
1136 CONFIGFS_ATTR(epf_ntb_, mw3_bar);
1137 CONFIGFS_ATTR(epf_ntb_, mw4_bar);
1138
1139 static struct configfs_attribute *epf_ntb_attrs[] = {
1140 &epf_ntb_attr_spad_count,
1141 &epf_ntb_attr_db_count,
1142 &epf_ntb_attr_num_mws,
1143 &epf_ntb_attr_mw1,
1144 &epf_ntb_attr_mw2,
1145 &epf_ntb_attr_mw3,
1146 &epf_ntb_attr_mw4,
1147 &epf_ntb_attr_vbus_number,
1148 &epf_ntb_attr_vntb_pid,
1149 &epf_ntb_attr_vntb_vid,
1150 &epf_ntb_attr_ctrl_bar,
1151 &epf_ntb_attr_db_bar,
1152 &epf_ntb_attr_mw1_bar,
1153 &epf_ntb_attr_mw2_bar,
1154 &epf_ntb_attr_mw3_bar,
1155 &epf_ntb_attr_mw4_bar,
1156 NULL,
1157 };
1158
1159 static const struct config_item_type ntb_group_type = {
1160 .ct_attrs = epf_ntb_attrs,
1161 .ct_owner = THIS_MODULE,
1162 };
1163
1164 /**
1165 * epf_ntb_add_cfs() - Add configfs directory specific to NTB
1166 * @epf: NTB endpoint function device
1167 * @group: A pointer to the config_group structure referencing a group of
1168 * config_items of a specific type that belong to a specific sub-system.
1169 *
1170 * Add configfs directory specific to NTB. This directory will hold
1171 * NTB specific properties like db_count, spad_count, num_mws etc.,
1172 *
1173 * Returns: Pointer to config_group
1174 */
epf_ntb_add_cfs(struct pci_epf * epf,struct config_group * group)1175 static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
1176 struct config_group *group)
1177 {
1178 struct epf_ntb *ntb = epf_get_drvdata(epf);
1179 struct config_group *ntb_group = &ntb->group;
1180 struct device *dev = &epf->dev;
1181
1182 config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
1183
1184 return ntb_group;
1185 }
1186
1187 /*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
1188
1189 static u32 pci_space[] = {
1190 0xffffffff, /* Device ID, Vendor ID */
1191 0, /* Status, Command */
1192 0xffffffff, /* Base Class, Subclass, Prog Intf, Revision ID */
1193 0x40, /* BIST, Header Type, Latency Timer, Cache Line Size */
1194 0, /* BAR 0 */
1195 0, /* BAR 1 */
1196 0, /* BAR 2 */
1197 0, /* BAR 3 */
1198 0, /* BAR 4 */
1199 0, /* BAR 5 */
1200 0, /* Cardbus CIS Pointer */
1201 0, /* Subsystem ID, Subsystem Vendor ID */
1202 0, /* ROM Base Address */
1203 0, /* Reserved, Capabilities Pointer */
1204 0, /* Reserved */
1205 0, /* Max_Lat, Min_Gnt, Interrupt Pin, Interrupt Line */
1206 };
1207
pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)1208 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
1209 {
1210 if (devfn == 0) {
1211 memcpy(val, ((u8 *)pci_space) + where, size);
1212 return PCIBIOS_SUCCESSFUL;
1213 }
1214 return PCIBIOS_DEVICE_NOT_FOUND;
1215 }
1216
pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)1217 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
1218 {
1219 return 0;
1220 }
1221
1222 static struct pci_ops vpci_ops = {
1223 .read = pci_read,
1224 .write = pci_write,
1225 };
1226
vpci_scan_bus(void * sysdata)1227 static int vpci_scan_bus(void *sysdata)
1228 {
1229 struct pci_bus *vpci_bus;
1230 struct epf_ntb *ndev = sysdata;
1231
1232 vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
1233 if (!vpci_bus) {
1234 pr_err("create pci bus failed\n");
1235 return -EINVAL;
1236 }
1237
1238 pci_bus_add_devices(vpci_bus);
1239
1240 return 0;
1241 }
1242
1243 /*==================== Virtual PCIe NTB driver ==========================*/
1244
vntb_epf_mw_count(struct ntb_dev * ntb,int pidx)1245 static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
1246 {
1247 struct epf_ntb *ndev = ntb_ndev(ntb);
1248
1249 return ndev->num_mws;
1250 }
1251
vntb_epf_spad_count(struct ntb_dev * ntb)1252 static int vntb_epf_spad_count(struct ntb_dev *ntb)
1253 {
1254 return ntb_ndev(ntb)->spad_count;
1255 }
1256
vntb_epf_peer_mw_count(struct ntb_dev * ntb)1257 static int vntb_epf_peer_mw_count(struct ntb_dev *ntb)
1258 {
1259 return ntb_ndev(ntb)->num_mws;
1260 }
1261
vntb_epf_db_valid_mask(struct ntb_dev * ntb)1262 static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb)
1263 {
1264 return BIT_ULL(ntb_ndev(ntb)->db_count) - 1;
1265 }
1266
vntb_epf_db_set_mask(struct ntb_dev * ntb,u64 db_bits)1267 static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1268 {
1269 return 0;
1270 }
1271
vntb_epf_mw_set_trans(struct ntb_dev * ndev,int pidx,int idx,dma_addr_t addr,resource_size_t size)1272 static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
1273 dma_addr_t addr, resource_size_t size)
1274 {
1275 struct epf_ntb *ntb = ntb_ndev(ndev);
1276 struct pci_epf_bar *epf_bar;
1277 enum pci_barno barno;
1278 int ret;
1279 struct device *dev;
1280
1281 dev = &ntb->ntb.dev;
1282 barno = ntb->epf_ntb_bar[BAR_MW1 + idx];
1283 epf_bar = &ntb->epf->bar[barno];
1284 epf_bar->phys_addr = addr;
1285 epf_bar->barno = barno;
1286 epf_bar->size = size;
1287
1288 ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
1289 if (ret) {
1290 dev_err(dev, "failure set mw trans\n");
1291 return ret;
1292 }
1293 return 0;
1294 }
1295
vntb_epf_mw_clear_trans(struct ntb_dev * ntb,int pidx,int idx)1296 static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
1297 {
1298 return 0;
1299 }
1300
vntb_epf_peer_mw_get_addr(struct ntb_dev * ndev,int idx,phys_addr_t * base,resource_size_t * size)1301 static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx,
1302 phys_addr_t *base, resource_size_t *size)
1303 {
1304
1305 struct epf_ntb *ntb = ntb_ndev(ndev);
1306
1307 if (base)
1308 *base = ntb->vpci_mw_phy[idx];
1309
1310 if (size)
1311 *size = ntb->mws_size[idx];
1312
1313 return 0;
1314 }
1315
vntb_epf_link_enable(struct ntb_dev * ntb,enum ntb_speed max_speed,enum ntb_width max_width)1316 static int vntb_epf_link_enable(struct ntb_dev *ntb,
1317 enum ntb_speed max_speed,
1318 enum ntb_width max_width)
1319 {
1320 return 0;
1321 }
1322
vntb_epf_spad_read(struct ntb_dev * ndev,int idx)1323 static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx)
1324 {
1325 struct epf_ntb *ntb = ntb_ndev(ndev);
1326 int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * sizeof(u32);
1327 u32 val;
1328 void __iomem *base = (void __iomem *)ntb->reg;
1329
1330 val = readl(base + off + ct + idx * sizeof(u32));
1331 return val;
1332 }
1333
vntb_epf_spad_write(struct ntb_dev * ndev,int idx,u32 val)1334 static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val)
1335 {
1336 struct epf_ntb *ntb = ntb_ndev(ndev);
1337 struct epf_ntb_ctrl *ctrl = ntb->reg;
1338 int off = ctrl->spad_offset, ct = ctrl->spad_count * sizeof(u32);
1339 void __iomem *base = (void __iomem *)ntb->reg;
1340
1341 writel(val, base + off + ct + idx * sizeof(u32));
1342 return 0;
1343 }
1344
vntb_epf_peer_spad_read(struct ntb_dev * ndev,int pidx,int idx)1345 static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx)
1346 {
1347 struct epf_ntb *ntb = ntb_ndev(ndev);
1348 struct epf_ntb_ctrl *ctrl = ntb->reg;
1349 int off = ctrl->spad_offset;
1350 void __iomem *base = (void __iomem *)ntb->reg;
1351 u32 val;
1352
1353 val = readl(base + off + idx * sizeof(u32));
1354 return val;
1355 }
1356
vntb_epf_peer_spad_write(struct ntb_dev * ndev,int pidx,int idx,u32 val)1357 static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val)
1358 {
1359 struct epf_ntb *ntb = ntb_ndev(ndev);
1360 struct epf_ntb_ctrl *ctrl = ntb->reg;
1361 int off = ctrl->spad_offset;
1362 void __iomem *base = (void __iomem *)ntb->reg;
1363
1364 writel(val, base + off + idx * sizeof(u32));
1365 return 0;
1366 }
1367
vntb_epf_peer_db_set(struct ntb_dev * ndev,u64 db_bits)1368 static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
1369 {
1370 u32 interrupt_num = ffs(db_bits) + 1;
1371 struct epf_ntb *ntb = ntb_ndev(ndev);
1372 u8 func_no, vfunc_no;
1373 int ret;
1374
1375 func_no = ntb->epf->func_no;
1376 vfunc_no = ntb->epf->vfunc_no;
1377
1378 ret = pci_epc_raise_irq(ntb->epf->epc, func_no, vfunc_no,
1379 PCI_IRQ_MSI, interrupt_num + 1);
1380 if (ret)
1381 dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
1382
1383 return ret;
1384 }
1385
vntb_epf_db_read(struct ntb_dev * ndev)1386 static u64 vntb_epf_db_read(struct ntb_dev *ndev)
1387 {
1388 struct epf_ntb *ntb = ntb_ndev(ndev);
1389
1390 return atomic64_read(&ntb->db);
1391 }
1392
vntb_epf_mw_get_align(struct ntb_dev * ndev,int pidx,int idx,resource_size_t * addr_align,resource_size_t * size_align,resource_size_t * size_max)1393 static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
1394 resource_size_t *addr_align,
1395 resource_size_t *size_align,
1396 resource_size_t *size_max)
1397 {
1398 struct epf_ntb *ntb = ntb_ndev(ndev);
1399
1400 if (addr_align)
1401 *addr_align = SZ_4K;
1402
1403 if (size_align)
1404 *size_align = 1;
1405
1406 if (size_max)
1407 *size_max = ntb->mws_size[idx];
1408
1409 return 0;
1410 }
1411
vntb_epf_link_is_up(struct ntb_dev * ndev,enum ntb_speed * speed,enum ntb_width * width)1412 static u64 vntb_epf_link_is_up(struct ntb_dev *ndev,
1413 enum ntb_speed *speed,
1414 enum ntb_width *width)
1415 {
1416 struct epf_ntb *ntb = ntb_ndev(ndev);
1417
1418 return ntb->reg->link_status;
1419 }
1420
vntb_epf_db_clear_mask(struct ntb_dev * ndev,u64 db_bits)1421 static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits)
1422 {
1423 return 0;
1424 }
1425
vntb_epf_db_clear(struct ntb_dev * ndev,u64 db_bits)1426 static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
1427 {
1428 struct epf_ntb *ntb = ntb_ndev(ndev);
1429
1430 atomic64_and(~db_bits, &ntb->db);
1431 return 0;
1432 }
1433
vntb_epf_link_disable(struct ntb_dev * ntb)1434 static int vntb_epf_link_disable(struct ntb_dev *ntb)
1435 {
1436 return 0;
1437 }
1438
1439 static const struct ntb_dev_ops vntb_epf_ops = {
1440 .mw_count = vntb_epf_mw_count,
1441 .spad_count = vntb_epf_spad_count,
1442 .peer_mw_count = vntb_epf_peer_mw_count,
1443 .db_valid_mask = vntb_epf_db_valid_mask,
1444 .db_set_mask = vntb_epf_db_set_mask,
1445 .mw_set_trans = vntb_epf_mw_set_trans,
1446 .mw_clear_trans = vntb_epf_mw_clear_trans,
1447 .peer_mw_get_addr = vntb_epf_peer_mw_get_addr,
1448 .link_enable = vntb_epf_link_enable,
1449 .spad_read = vntb_epf_spad_read,
1450 .spad_write = vntb_epf_spad_write,
1451 .peer_spad_read = vntb_epf_peer_spad_read,
1452 .peer_spad_write = vntb_epf_peer_spad_write,
1453 .peer_db_set = vntb_epf_peer_db_set,
1454 .db_read = vntb_epf_db_read,
1455 .mw_get_align = vntb_epf_mw_get_align,
1456 .link_is_up = vntb_epf_link_is_up,
1457 .db_clear_mask = vntb_epf_db_clear_mask,
1458 .db_clear = vntb_epf_db_clear,
1459 .link_disable = vntb_epf_link_disable,
1460 };
1461
pci_vntb_probe(struct pci_dev * pdev,const struct pci_device_id * id)1462 static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1463 {
1464 int ret;
1465 struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata;
1466 struct device *dev = &pdev->dev;
1467
1468 ndev->ntb.pdev = pdev;
1469 ndev->ntb.topo = NTB_TOPO_NONE;
1470 ndev->ntb.ops = &vntb_epf_ops;
1471
1472 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1473 if (ret) {
1474 dev_err(dev, "Cannot set DMA mask\n");
1475 return ret;
1476 }
1477
1478 ret = ntb_register_device(&ndev->ntb);
1479 if (ret) {
1480 dev_err(dev, "Failed to register NTB device\n");
1481 return ret;
1482 }
1483
1484 dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
1485 return 0;
1486 }
1487
1488 static struct pci_device_id pci_vntb_table[] = {
1489 {
1490 PCI_DEVICE(0xffff, 0xffff),
1491 },
1492 {},
1493 };
1494
1495 static struct pci_driver vntb_pci_driver = {
1496 .name = "pci-vntb",
1497 .id_table = pci_vntb_table,
1498 .probe = pci_vntb_probe,
1499 };
1500
1501 /* ============ PCIe EPF Driver Bind ====================*/
1502
1503 /**
1504 * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
1505 * @epf: NTB endpoint function device
1506 *
1507 * Initialize both the endpoint controllers associated with NTB function device.
1508 * Invoked when a primary interface or secondary interface is bound to EPC
1509 * device. This function will succeed only when EPC is bound to both the
1510 * interfaces.
1511 *
1512 * Returns: Zero for success, or an error code in case of failure
1513 */
epf_ntb_bind(struct pci_epf * epf)1514 static int epf_ntb_bind(struct pci_epf *epf)
1515 {
1516 struct epf_ntb *ntb = epf_get_drvdata(epf);
1517 struct device *dev = &epf->dev;
1518 int ret;
1519
1520 if (!epf->epc) {
1521 dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
1522 return 0;
1523 }
1524
1525 ret = epf_ntb_init_epc_bar(ntb);
1526 if (ret) {
1527 dev_err(dev, "Failed to create NTB EPC\n");
1528 goto err_bar_init;
1529 }
1530
1531 ret = epf_ntb_config_spad_bar_alloc(ntb);
1532 if (ret) {
1533 dev_err(dev, "Failed to allocate BAR memory\n");
1534 goto err_bar_alloc;
1535 }
1536
1537 ret = epf_ntb_epc_init(ntb);
1538 if (ret) {
1539 dev_err(dev, "Failed to initialize EPC\n");
1540 goto err_bar_alloc;
1541 }
1542
1543 epf_set_drvdata(epf, ntb);
1544
1545 pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid;
1546 pci_vntb_table[0].vendor = ntb->vntb_vid;
1547 pci_vntb_table[0].device = ntb->vntb_pid;
1548
1549 ret = pci_register_driver(&vntb_pci_driver);
1550 if (ret) {
1551 dev_err(dev, "failure register vntb pci driver\n");
1552 goto err_epc_cleanup;
1553 }
1554
1555 ret = vpci_scan_bus(ntb);
1556 if (ret)
1557 goto err_unregister;
1558
1559 return 0;
1560
1561 err_unregister:
1562 pci_unregister_driver(&vntb_pci_driver);
1563 err_epc_cleanup:
1564 epf_ntb_epc_cleanup(ntb);
1565 err_bar_alloc:
1566 epf_ntb_config_spad_bar_free(ntb);
1567
1568 err_bar_init:
1569 epf_ntb_epc_destroy(ntb);
1570
1571 return ret;
1572 }
1573
1574 /**
1575 * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
1576 * @epf: NTB endpoint function device
1577 *
1578 * Cleanup the initialization from epf_ntb_bind()
1579 */
epf_ntb_unbind(struct pci_epf * epf)1580 static void epf_ntb_unbind(struct pci_epf *epf)
1581 {
1582 struct epf_ntb *ntb = epf_get_drvdata(epf);
1583
1584 epf_ntb_epc_cleanup(ntb);
1585 epf_ntb_config_spad_bar_free(ntb);
1586 epf_ntb_epc_destroy(ntb);
1587
1588 pci_unregister_driver(&vntb_pci_driver);
1589 }
1590
1591 // EPF driver probe
1592 static const struct pci_epf_ops epf_ntb_ops = {
1593 .bind = epf_ntb_bind,
1594 .unbind = epf_ntb_unbind,
1595 .add_cfs = epf_ntb_add_cfs,
1596 };
1597
1598 /**
1599 * epf_ntb_probe() - Probe NTB function driver
1600 * @epf: NTB endpoint function device
1601 * @id: NTB endpoint function device ID
1602 *
1603 * Probe NTB function driver when endpoint function bus detects a NTB
1604 * endpoint function.
1605 *
1606 * Returns: Zero for success, or an error code in case of failure
1607 */
epf_ntb_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)1608 static int epf_ntb_probe(struct pci_epf *epf,
1609 const struct pci_epf_device_id *id)
1610 {
1611 struct epf_ntb *ntb;
1612 struct device *dev;
1613 int i;
1614
1615 dev = &epf->dev;
1616
1617 ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
1618 if (!ntb)
1619 return -ENOMEM;
1620
1621 epf->header = &epf_ntb_header;
1622 ntb->epf = epf;
1623 ntb->vbus_number = 0xff;
1624
1625 /* Initially, no bar is assigned */
1626 for (i = 0; i < VNTB_BAR_NUM; i++)
1627 ntb->epf_ntb_bar[i] = NO_BAR;
1628
1629 epf_set_drvdata(epf, ntb);
1630
1631 dev_info(dev, "pci-ep epf driver loaded\n");
1632 return 0;
1633 }
1634
1635 static const struct pci_epf_device_id epf_ntb_ids[] = {
1636 {
1637 .name = "pci_epf_vntb",
1638 },
1639 {},
1640 };
1641
1642 static struct pci_epf_driver epf_ntb_driver = {
1643 .driver.name = "pci_epf_vntb",
1644 .probe = epf_ntb_probe,
1645 .id_table = epf_ntb_ids,
1646 .ops = &epf_ntb_ops,
1647 .owner = THIS_MODULE,
1648 };
1649
epf_ntb_init(void)1650 static int __init epf_ntb_init(void)
1651 {
1652 int ret;
1653
1654 kpcintb_workqueue = alloc_workqueue("kpcintb",
1655 WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU, 0);
1656 if (!kpcintb_workqueue) {
1657 pr_err("Failed to allocate kpcintb workqueue\n");
1658 return -ENOMEM;
1659 }
1660
1661 ret = pci_epf_register_driver(&epf_ntb_driver);
1662 if (ret) {
1663 destroy_workqueue(kpcintb_workqueue);
1664 pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
1665 return ret;
1666 }
1667
1668 return 0;
1669 }
1670 module_init(epf_ntb_init);
1671
epf_ntb_exit(void)1672 static void __exit epf_ntb_exit(void)
1673 {
1674 pci_epf_unregister_driver(&epf_ntb_driver);
1675 destroy_workqueue(kpcintb_workqueue);
1676 }
1677 module_exit(epf_ntb_exit);
1678
1679 MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
1680 MODULE_AUTHOR("Frank Li <Frank.li@nxp.com>");
1681 MODULE_LICENSE("GPL v2");
1682