1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Endpoint *Controller* (EPC) library
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12
13 #include <linux/pci-epc.h>
14 #include <linux/pci-epf.h>
15 #include <linux/pci-ep-cfs.h>
16
17 static const struct class pci_epc_class = {
18 .name = "pci_epc",
19 };
20
devm_pci_epc_release(struct device * dev,void * res)21 static void devm_pci_epc_release(struct device *dev, void *res)
22 {
23 struct pci_epc *epc = *(struct pci_epc **)res;
24
25 pci_epc_destroy(epc);
26 }
27
28 /**
29 * pci_epc_put() - release the PCI endpoint controller
30 * @epc: epc returned by pci_epc_get()
31 *
32 * release the refcount the caller obtained by invoking pci_epc_get()
33 */
pci_epc_put(struct pci_epc * epc)34 void pci_epc_put(struct pci_epc *epc)
35 {
36 if (IS_ERR_OR_NULL(epc))
37 return;
38
39 module_put(epc->ops->owner);
40 put_device(&epc->dev);
41 }
42 EXPORT_SYMBOL_GPL(pci_epc_put);
43
44 /**
45 * pci_epc_get() - get the PCI endpoint controller
46 * @epc_name: device name of the endpoint controller
47 *
48 * Invoke to get struct pci_epc * corresponding to the device name of the
49 * endpoint controller
50 */
pci_epc_get(const char * epc_name)51 struct pci_epc *pci_epc_get(const char *epc_name)
52 {
53 int ret = -EINVAL;
54 struct pci_epc *epc;
55 struct device *dev;
56
57 dev = class_find_device_by_name(&pci_epc_class, epc_name);
58 if (!dev)
59 goto err;
60
61 epc = to_pci_epc(dev);
62 if (try_module_get(epc->ops->owner))
63 return epc;
64
65 err:
66 put_device(dev);
67 return ERR_PTR(ret);
68 }
69 EXPORT_SYMBOL_GPL(pci_epc_get);
70
71 /**
72 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
73 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
74 *
75 * Invoke to get the first unreserved BAR that can be used by the endpoint
76 * function.
77 */
78 enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features * epc_features)79 pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
80 {
81 return pci_epc_get_next_free_bar(epc_features, BAR_0);
82 }
83 EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
84
85 /**
86 * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
87 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
88 * @bar: the starting BAR number from where unreserved BAR should be searched
89 *
90 * Invoke to get the next unreserved BAR starting from @bar that can be used
91 * for endpoint function.
92 */
pci_epc_get_next_free_bar(const struct pci_epc_features * epc_features,enum pci_barno bar)93 enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
94 *epc_features, enum pci_barno bar)
95 {
96 int i;
97
98 if (!epc_features)
99 return BAR_0;
100
101 /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
102 if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
103 bar++;
104
105 for (i = bar; i < PCI_STD_NUM_BARS; i++) {
106 /* If the BAR is not reserved, return it. */
107 if (epc_features->bar[i].type != BAR_RESERVED)
108 return i;
109 }
110
111 return NO_BAR;
112 }
113 EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
114
pci_epc_function_is_valid(struct pci_epc * epc,u8 func_no,u8 vfunc_no)115 static bool pci_epc_function_is_valid(struct pci_epc *epc,
116 u8 func_no, u8 vfunc_no)
117 {
118 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
119 return false;
120
121 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
122 return false;
123
124 return true;
125 }
126
127 /**
128 * pci_epc_get_features() - get the features supported by EPC
129 * @epc: the features supported by *this* EPC device will be returned
130 * @func_no: the features supported by the EPC device specific to the
131 * endpoint function with func_no will be returned
132 * @vfunc_no: the features supported by the EPC device specific to the
133 * virtual endpoint function with vfunc_no will be returned
134 *
135 * Invoke to get the features provided by the EPC which may be
136 * specific to an endpoint function. Returns pci_epc_features on success
137 * and NULL for any failures.
138 */
pci_epc_get_features(struct pci_epc * epc,u8 func_no,u8 vfunc_no)139 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
140 u8 func_no, u8 vfunc_no)
141 {
142 const struct pci_epc_features *epc_features;
143
144 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
145 return NULL;
146
147 if (!epc->ops->get_features)
148 return NULL;
149
150 mutex_lock(&epc->lock);
151 epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
152 mutex_unlock(&epc->lock);
153
154 return epc_features;
155 }
156 EXPORT_SYMBOL_GPL(pci_epc_get_features);
157
158 /**
159 * pci_epc_stop() - stop the PCI link
160 * @epc: the link of the EPC device that has to be stopped
161 *
162 * Invoke to stop the PCI link
163 */
pci_epc_stop(struct pci_epc * epc)164 void pci_epc_stop(struct pci_epc *epc)
165 {
166 if (IS_ERR(epc) || !epc->ops->stop)
167 return;
168
169 mutex_lock(&epc->lock);
170 epc->ops->stop(epc);
171 mutex_unlock(&epc->lock);
172 }
173 EXPORT_SYMBOL_GPL(pci_epc_stop);
174
175 /**
176 * pci_epc_start() - start the PCI link
177 * @epc: the link of *this* EPC device has to be started
178 *
179 * Invoke to start the PCI link
180 */
pci_epc_start(struct pci_epc * epc)181 int pci_epc_start(struct pci_epc *epc)
182 {
183 int ret;
184
185 if (IS_ERR(epc))
186 return -EINVAL;
187
188 if (!epc->ops->start)
189 return 0;
190
191 mutex_lock(&epc->lock);
192 ret = epc->ops->start(epc);
193 mutex_unlock(&epc->lock);
194
195 return ret;
196 }
197 EXPORT_SYMBOL_GPL(pci_epc_start);
198
199 /**
200 * pci_epc_raise_irq() - interrupt the host system
201 * @epc: the EPC device which has to interrupt the host
202 * @func_no: the physical endpoint function number in the EPC device
203 * @vfunc_no: the virtual endpoint function number in the physical function
204 * @type: specify the type of interrupt; INTX, MSI or MSI-X
205 * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
206 *
207 * Invoke to raise an INTX, MSI or MSI-X interrupt
208 */
pci_epc_raise_irq(struct pci_epc * epc,u8 func_no,u8 vfunc_no,unsigned int type,u16 interrupt_num)209 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
210 unsigned int type, u16 interrupt_num)
211 {
212 int ret;
213
214 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
215 return -EINVAL;
216
217 if (!epc->ops->raise_irq)
218 return 0;
219
220 mutex_lock(&epc->lock);
221 ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
222 mutex_unlock(&epc->lock);
223
224 return ret;
225 }
226 EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
227
228 /**
229 * pci_epc_map_msi_irq() - Map physical address to MSI address and return
230 * MSI data
231 * @epc: the EPC device which has the MSI capability
232 * @func_no: the physical endpoint function number in the EPC device
233 * @vfunc_no: the virtual endpoint function number in the physical function
234 * @phys_addr: the physical address of the outbound region
235 * @interrupt_num: the MSI interrupt number with range (1-N)
236 * @entry_size: Size of Outbound address region for each interrupt
237 * @msi_data: the data that should be written in order to raise MSI interrupt
238 * with interrupt number as 'interrupt num'
239 * @msi_addr_offset: Offset of MSI address from the aligned outbound address
240 * to which the MSI address is mapped
241 *
242 * Invoke to map physical address to MSI address and return MSI data. The
243 * physical address should be an address in the outbound region. This is
244 * required to implement doorbell functionality of NTB wherein EPC on either
245 * side of the interface (primary and secondary) can directly write to the
246 * physical address (in outbound region) of the other interface to ring
247 * doorbell.
248 */
pci_epc_map_msi_irq(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t phys_addr,u8 interrupt_num,u32 entry_size,u32 * msi_data,u32 * msi_addr_offset)249 int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
250 phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
251 u32 *msi_data, u32 *msi_addr_offset)
252 {
253 int ret;
254
255 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
256 return -EINVAL;
257
258 if (!epc->ops->map_msi_irq)
259 return -EINVAL;
260
261 mutex_lock(&epc->lock);
262 ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
263 interrupt_num, entry_size, msi_data,
264 msi_addr_offset);
265 mutex_unlock(&epc->lock);
266
267 return ret;
268 }
269 EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
270
271 /**
272 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
273 * @epc: the EPC device to which MSI interrupts was requested
274 * @func_no: the physical endpoint function number in the EPC device
275 * @vfunc_no: the virtual endpoint function number in the physical function
276 *
277 * Invoke to get the number of MSI interrupts allocated by the RC
278 */
pci_epc_get_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no)279 int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
280 {
281 int interrupt;
282
283 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
284 return 0;
285
286 if (!epc->ops->get_msi)
287 return 0;
288
289 mutex_lock(&epc->lock);
290 interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
291 mutex_unlock(&epc->lock);
292
293 if (interrupt < 0)
294 return 0;
295
296 interrupt = 1 << interrupt;
297
298 return interrupt;
299 }
300 EXPORT_SYMBOL_GPL(pci_epc_get_msi);
301
302 /**
303 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
304 * @epc: the EPC device on which MSI has to be configured
305 * @func_no: the physical endpoint function number in the EPC device
306 * @vfunc_no: the virtual endpoint function number in the physical function
307 * @interrupts: number of MSI interrupts required by the EPF
308 *
309 * Invoke to set the required number of MSI interrupts.
310 */
pci_epc_set_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u8 interrupts)311 int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
312 {
313 int ret;
314 u8 encode_int;
315
316 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
317 return -EINVAL;
318
319 if (interrupts < 1 || interrupts > 32)
320 return -EINVAL;
321
322 if (!epc->ops->set_msi)
323 return 0;
324
325 encode_int = order_base_2(interrupts);
326
327 mutex_lock(&epc->lock);
328 ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
329 mutex_unlock(&epc->lock);
330
331 return ret;
332 }
333 EXPORT_SYMBOL_GPL(pci_epc_set_msi);
334
335 /**
336 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
337 * @epc: the EPC device to which MSI-X interrupts was requested
338 * @func_no: the physical endpoint function number in the EPC device
339 * @vfunc_no: the virtual endpoint function number in the physical function
340 *
341 * Invoke to get the number of MSI-X interrupts allocated by the RC
342 */
pci_epc_get_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no)343 int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
344 {
345 int interrupt;
346
347 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
348 return 0;
349
350 if (!epc->ops->get_msix)
351 return 0;
352
353 mutex_lock(&epc->lock);
354 interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
355 mutex_unlock(&epc->lock);
356
357 if (interrupt < 0)
358 return 0;
359
360 return interrupt + 1;
361 }
362 EXPORT_SYMBOL_GPL(pci_epc_get_msix);
363
364 /**
365 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
366 * @epc: the EPC device on which MSI-X has to be configured
367 * @func_no: the physical endpoint function number in the EPC device
368 * @vfunc_no: the virtual endpoint function number in the physical function
369 * @interrupts: number of MSI-X interrupts required by the EPF
370 * @bir: BAR where the MSI-X table resides
371 * @offset: Offset pointing to the start of MSI-X table
372 *
373 * Invoke to set the required number of MSI-X interrupts.
374 */
pci_epc_set_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u16 interrupts,enum pci_barno bir,u32 offset)375 int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
376 u16 interrupts, enum pci_barno bir, u32 offset)
377 {
378 int ret;
379
380 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
381 return -EINVAL;
382
383 if (interrupts < 1 || interrupts > 2048)
384 return -EINVAL;
385
386 if (!epc->ops->set_msix)
387 return 0;
388
389 mutex_lock(&epc->lock);
390 ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
391 offset);
392 mutex_unlock(&epc->lock);
393
394 return ret;
395 }
396 EXPORT_SYMBOL_GPL(pci_epc_set_msix);
397
398 /**
399 * pci_epc_unmap_addr() - unmap CPU address from PCI address
400 * @epc: the EPC device on which address is allocated
401 * @func_no: the physical endpoint function number in the EPC device
402 * @vfunc_no: the virtual endpoint function number in the physical function
403 * @phys_addr: physical address of the local system
404 *
405 * Invoke to unmap the CPU address from PCI address.
406 */
pci_epc_unmap_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t phys_addr)407 void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
408 phys_addr_t phys_addr)
409 {
410 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
411 return;
412
413 if (!epc->ops->unmap_addr)
414 return;
415
416 mutex_lock(&epc->lock);
417 epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
418 mutex_unlock(&epc->lock);
419 }
420 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
421
422 /**
423 * pci_epc_map_addr() - map CPU address to PCI address
424 * @epc: the EPC device on which address is allocated
425 * @func_no: the physical endpoint function number in the EPC device
426 * @vfunc_no: the virtual endpoint function number in the physical function
427 * @phys_addr: physical address of the local system
428 * @pci_addr: PCI address to which the physical address should be mapped
429 * @size: the size of the allocation
430 *
431 * Invoke to map CPU address with PCI address.
432 */
pci_epc_map_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t phys_addr,u64 pci_addr,size_t size)433 int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
434 phys_addr_t phys_addr, u64 pci_addr, size_t size)
435 {
436 int ret;
437
438 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
439 return -EINVAL;
440
441 if (!epc->ops->map_addr)
442 return 0;
443
444 mutex_lock(&epc->lock);
445 ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
446 size);
447 mutex_unlock(&epc->lock);
448
449 return ret;
450 }
451 EXPORT_SYMBOL_GPL(pci_epc_map_addr);
452
453 /**
454 * pci_epc_mem_map() - allocate and map a PCI address to a CPU address
455 * @epc: the EPC device on which the CPU address is to be allocated and mapped
456 * @func_no: the physical endpoint function number in the EPC device
457 * @vfunc_no: the virtual endpoint function number in the physical function
458 * @pci_addr: PCI address to which the CPU address should be mapped
459 * @pci_size: the number of bytes to map starting from @pci_addr
460 * @map: where to return the mapping information
461 *
462 * Allocate a controller memory address region and map it to a RC PCI address
463 * region, taking into account the controller physical address mapping
464 * constraints using the controller operation align_addr(). If this operation is
465 * not defined, we assume that there are no alignment constraints for the
466 * mapping.
467 *
468 * The effective size of the PCI address range mapped from @pci_addr is
469 * indicated by @map->pci_size. This size may be less than the requested
470 * @pci_size. The local virtual CPU address for the mapping is indicated by
471 * @map->virt_addr (@map->phys_addr indicates the physical address).
472 * The size and CPU address of the controller memory allocated and mapped are
473 * respectively indicated by @map->map_size and @map->virt_base (and
474 * @map->phys_base for the physical address of @map->virt_base).
475 *
476 * Returns 0 on success and a negative error code in case of error.
477 */
pci_epc_mem_map(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u64 pci_addr,size_t pci_size,struct pci_epc_map * map)478 int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
479 u64 pci_addr, size_t pci_size, struct pci_epc_map *map)
480 {
481 size_t map_size = pci_size;
482 size_t map_offset = 0;
483 int ret;
484
485 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
486 return -EINVAL;
487
488 if (!pci_size || !map)
489 return -EINVAL;
490
491 /*
492 * Align the PCI address to map. If the controller defines the
493 * .align_addr() operation, use it to determine the PCI address to map
494 * and the size of the mapping. Otherwise, assume that the controller
495 * has no alignment constraint.
496 */
497 memset(map, 0, sizeof(*map));
498 map->pci_addr = pci_addr;
499 if (epc->ops->align_addr)
500 map->map_pci_addr =
501 epc->ops->align_addr(epc, pci_addr,
502 &map_size, &map_offset);
503 else
504 map->map_pci_addr = pci_addr;
505 map->map_size = map_size;
506 if (map->map_pci_addr + map->map_size < pci_addr + pci_size)
507 map->pci_size = map->map_pci_addr + map->map_size - pci_addr;
508 else
509 map->pci_size = pci_size;
510
511 map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base,
512 map->map_size);
513 if (!map->virt_base)
514 return -ENOMEM;
515
516 map->phys_addr = map->phys_base + map_offset;
517 map->virt_addr = map->virt_base + map_offset;
518
519 ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base,
520 map->map_pci_addr, map->map_size);
521 if (ret) {
522 pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
523 map->map_size);
524 return ret;
525 }
526
527 return 0;
528 }
529 EXPORT_SYMBOL_GPL(pci_epc_mem_map);
530
531 /**
532 * pci_epc_mem_unmap() - unmap and free a CPU address region
533 * @epc: the EPC device on which the CPU address is allocated and mapped
534 * @func_no: the physical endpoint function number in the EPC device
535 * @vfunc_no: the virtual endpoint function number in the physical function
536 * @map: the mapping information
537 *
538 * Unmap and free a CPU address region that was allocated and mapped with
539 * pci_epc_mem_map().
540 */
pci_epc_mem_unmap(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epc_map * map)541 void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
542 struct pci_epc_map *map)
543 {
544 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
545 return;
546
547 if (!map || !map->virt_base)
548 return;
549
550 pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base);
551 pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
552 map->map_size);
553 }
554 EXPORT_SYMBOL_GPL(pci_epc_mem_unmap);
555
556 /**
557 * pci_epc_clear_bar() - reset the BAR
558 * @epc: the EPC device for which the BAR has to be cleared
559 * @func_no: the physical endpoint function number in the EPC device
560 * @vfunc_no: the virtual endpoint function number in the physical function
561 * @epf_bar: the struct epf_bar that contains the BAR information
562 *
563 * Invoke to reset the BAR of the endpoint device.
564 */
pci_epc_clear_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)565 void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
566 struct pci_epf_bar *epf_bar)
567 {
568 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
569 return;
570
571 if (epf_bar->barno == BAR_5 &&
572 epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
573 return;
574
575 if (!epc->ops->clear_bar)
576 return;
577
578 mutex_lock(&epc->lock);
579 epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
580 mutex_unlock(&epc->lock);
581 }
582 EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
583
584 /**
585 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
586 * @epc: the EPC device on which BAR has to be configured
587 * @func_no: the physical endpoint function number in the EPC device
588 * @vfunc_no: the virtual endpoint function number in the physical function
589 * @epf_bar: the struct epf_bar that contains the BAR information
590 *
591 * Invoke to configure the BAR of the endpoint device.
592 */
pci_epc_set_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)593 int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
594 struct pci_epf_bar *epf_bar)
595 {
596 const struct pci_epc_features *epc_features;
597 enum pci_barno bar = epf_bar->barno;
598 int flags = epf_bar->flags;
599 int ret;
600
601 epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
602 if (!epc_features)
603 return -EINVAL;
604
605 if (epc_features->bar[bar].type == BAR_RESIZABLE &&
606 (epf_bar->size < SZ_1M || (u64)epf_bar->size > (SZ_128G * 1024)))
607 return -EINVAL;
608
609 if (epc_features->bar[bar].type == BAR_FIXED &&
610 (epc_features->bar[bar].fixed_size != epf_bar->size))
611 return -EINVAL;
612
613 if (!is_power_of_2(epf_bar->size))
614 return -EINVAL;
615
616 if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
617 (flags & PCI_BASE_ADDRESS_SPACE_IO &&
618 flags & PCI_BASE_ADDRESS_IO_MASK) ||
619 (upper_32_bits(epf_bar->size) &&
620 !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
621 return -EINVAL;
622
623 if (!epc->ops->set_bar)
624 return 0;
625
626 mutex_lock(&epc->lock);
627 ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
628 mutex_unlock(&epc->lock);
629
630 return ret;
631 }
632 EXPORT_SYMBOL_GPL(pci_epc_set_bar);
633
634 /**
635 * pci_epc_bar_size_to_rebar_cap() - convert a size to the representation used
636 * by the Resizable BAR Capability Register
637 * @size: the size to convert
638 * @cap: where to store the result
639 *
640 * Returns 0 on success and a negative error code in case of error.
641 */
pci_epc_bar_size_to_rebar_cap(size_t size,u32 * cap)642 int pci_epc_bar_size_to_rebar_cap(size_t size, u32 *cap)
643 {
644 /*
645 * As per PCIe r6.0, sec 7.8.6.2, min size for a resizable BAR is 1 MB,
646 * thus disallow a requested BAR size smaller than 1 MB.
647 * Disallow a requested BAR size larger than 128 TB.
648 */
649 if (size < SZ_1M || (u64)size > (SZ_128G * 1024))
650 return -EINVAL;
651
652 *cap = ilog2(size) - ilog2(SZ_1M);
653
654 /* Sizes in REBAR_CAP start at BIT(4). */
655 *cap = BIT(*cap + 4);
656
657 return 0;
658 }
659 EXPORT_SYMBOL_GPL(pci_epc_bar_size_to_rebar_cap);
660
661 /**
662 * pci_epc_write_header() - write standard configuration header
663 * @epc: the EPC device to which the configuration header should be written
664 * @func_no: the physical endpoint function number in the EPC device
665 * @vfunc_no: the virtual endpoint function number in the physical function
666 * @header: standard configuration header fields
667 *
668 * Invoke to write the configuration header to the endpoint controller. Every
669 * endpoint controller will have a dedicated location to which the standard
670 * configuration header would be written. The callback function should write
671 * the header fields to this dedicated location.
672 */
pci_epc_write_header(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_header * header)673 int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
674 struct pci_epf_header *header)
675 {
676 int ret;
677
678 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
679 return -EINVAL;
680
681 /* Only Virtual Function #1 has deviceID */
682 if (vfunc_no > 1)
683 return -EINVAL;
684
685 if (!epc->ops->write_header)
686 return 0;
687
688 mutex_lock(&epc->lock);
689 ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
690 mutex_unlock(&epc->lock);
691
692 return ret;
693 }
694 EXPORT_SYMBOL_GPL(pci_epc_write_header);
695
696 /**
697 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
698 * @epc: the EPC device to which the endpoint function should be added
699 * @epf: the endpoint function to be added
700 * @type: Identifies if the EPC is connected to the primary or secondary
701 * interface of EPF
702 *
703 * A PCI endpoint device can have one or more functions. In the case of PCIe,
704 * the specification allows up to 8 PCIe endpoint functions. Invoke
705 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
706 */
pci_epc_add_epf(struct pci_epc * epc,struct pci_epf * epf,enum pci_epc_interface_type type)707 int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
708 enum pci_epc_interface_type type)
709 {
710 struct list_head *list;
711 u32 func_no;
712 int ret = 0;
713
714 if (IS_ERR_OR_NULL(epc) || epf->is_vf)
715 return -EINVAL;
716
717 if (type == PRIMARY_INTERFACE && epf->epc)
718 return -EBUSY;
719
720 if (type == SECONDARY_INTERFACE && epf->sec_epc)
721 return -EBUSY;
722
723 mutex_lock(&epc->list_lock);
724 func_no = find_first_zero_bit(&epc->function_num_map,
725 BITS_PER_LONG);
726 if (func_no >= BITS_PER_LONG) {
727 ret = -EINVAL;
728 goto ret;
729 }
730
731 if (func_no > epc->max_functions - 1) {
732 dev_err(&epc->dev, "Exceeding max supported Function Number\n");
733 ret = -EINVAL;
734 goto ret;
735 }
736
737 set_bit(func_no, &epc->function_num_map);
738 if (type == PRIMARY_INTERFACE) {
739 epf->func_no = func_no;
740 epf->epc = epc;
741 list = &epf->list;
742 } else {
743 epf->sec_epc_func_no = func_no;
744 epf->sec_epc = epc;
745 list = &epf->sec_epc_list;
746 }
747
748 list_add_tail(list, &epc->pci_epf);
749 ret:
750 mutex_unlock(&epc->list_lock);
751
752 return ret;
753 }
754 EXPORT_SYMBOL_GPL(pci_epc_add_epf);
755
756 /**
757 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
758 * @epc: the EPC device from which the endpoint function should be removed
759 * @epf: the endpoint function to be removed
760 * @type: identifies if the EPC is connected to the primary or secondary
761 * interface of EPF
762 *
763 * Invoke to remove PCI endpoint function from the endpoint controller.
764 */
pci_epc_remove_epf(struct pci_epc * epc,struct pci_epf * epf,enum pci_epc_interface_type type)765 void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
766 enum pci_epc_interface_type type)
767 {
768 struct list_head *list;
769 u32 func_no = 0;
770
771 if (IS_ERR_OR_NULL(epc) || !epf)
772 return;
773
774 mutex_lock(&epc->list_lock);
775 if (type == PRIMARY_INTERFACE) {
776 func_no = epf->func_no;
777 list = &epf->list;
778 epf->epc = NULL;
779 } else {
780 func_no = epf->sec_epc_func_no;
781 list = &epf->sec_epc_list;
782 epf->sec_epc = NULL;
783 }
784 clear_bit(func_no, &epc->function_num_map);
785 list_del(list);
786 mutex_unlock(&epc->list_lock);
787 }
788 EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
789
790 /**
791 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
792 * connection with the Root Complex.
793 * @epc: the EPC device which has established link with the host
794 *
795 * Invoke to Notify the EPF device that the EPC device has established a
796 * connection with the Root Complex.
797 */
pci_epc_linkup(struct pci_epc * epc)798 void pci_epc_linkup(struct pci_epc *epc)
799 {
800 struct pci_epf *epf;
801
802 if (IS_ERR_OR_NULL(epc))
803 return;
804
805 mutex_lock(&epc->list_lock);
806 list_for_each_entry(epf, &epc->pci_epf, list) {
807 mutex_lock(&epf->lock);
808 if (epf->event_ops && epf->event_ops->link_up)
809 epf->event_ops->link_up(epf);
810 mutex_unlock(&epf->lock);
811 }
812 mutex_unlock(&epc->list_lock);
813 }
814 EXPORT_SYMBOL_GPL(pci_epc_linkup);
815
816 /**
817 * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
818 * connection with the Root Complex.
819 * @epc: the EPC device which has dropped the link with the host
820 *
821 * Invoke to Notify the EPF device that the EPC device has dropped the
822 * connection with the Root Complex.
823 */
pci_epc_linkdown(struct pci_epc * epc)824 void pci_epc_linkdown(struct pci_epc *epc)
825 {
826 struct pci_epf *epf;
827
828 if (IS_ERR_OR_NULL(epc))
829 return;
830
831 mutex_lock(&epc->list_lock);
832 list_for_each_entry(epf, &epc->pci_epf, list) {
833 mutex_lock(&epf->lock);
834 if (epf->event_ops && epf->event_ops->link_down)
835 epf->event_ops->link_down(epf);
836 mutex_unlock(&epf->lock);
837 }
838 mutex_unlock(&epc->list_lock);
839 }
840 EXPORT_SYMBOL_GPL(pci_epc_linkdown);
841
842 /**
843 * pci_epc_init_notify() - Notify the EPF device that EPC device initialization
844 * is completed.
845 * @epc: the EPC device whose initialization is completed
846 *
847 * Invoke to Notify the EPF device that the EPC device's initialization
848 * is completed.
849 */
pci_epc_init_notify(struct pci_epc * epc)850 void pci_epc_init_notify(struct pci_epc *epc)
851 {
852 struct pci_epf *epf;
853
854 if (IS_ERR_OR_NULL(epc))
855 return;
856
857 mutex_lock(&epc->list_lock);
858 list_for_each_entry(epf, &epc->pci_epf, list) {
859 mutex_lock(&epf->lock);
860 if (epf->event_ops && epf->event_ops->epc_init)
861 epf->event_ops->epc_init(epf);
862 mutex_unlock(&epf->lock);
863 }
864 epc->init_complete = true;
865 mutex_unlock(&epc->list_lock);
866 }
867 EXPORT_SYMBOL_GPL(pci_epc_init_notify);
868
869 /**
870 * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
871 * complete to the EPF device
872 * @epc: the EPC device whose initialization is pending to be notified
873 * @epf: the EPF device to be notified
874 *
875 * Invoke to notify the pending EPC device initialization complete to the EPF
876 * device. This is used to deliver the notification if the EPC initialization
877 * got completed before the EPF driver bind.
878 */
pci_epc_notify_pending_init(struct pci_epc * epc,struct pci_epf * epf)879 void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
880 {
881 if (epc->init_complete) {
882 mutex_lock(&epf->lock);
883 if (epf->event_ops && epf->event_ops->epc_init)
884 epf->event_ops->epc_init(epf);
885 mutex_unlock(&epf->lock);
886 }
887 }
888 EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
889
890 /**
891 * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
892 * @epc: the EPC device whose deinitialization is completed
893 *
894 * Invoke to notify the EPF device that the EPC deinitialization is completed.
895 */
pci_epc_deinit_notify(struct pci_epc * epc)896 void pci_epc_deinit_notify(struct pci_epc *epc)
897 {
898 struct pci_epf *epf;
899
900 if (IS_ERR_OR_NULL(epc))
901 return;
902
903 mutex_lock(&epc->list_lock);
904 list_for_each_entry(epf, &epc->pci_epf, list) {
905 mutex_lock(&epf->lock);
906 if (epf->event_ops && epf->event_ops->epc_deinit)
907 epf->event_ops->epc_deinit(epf);
908 mutex_unlock(&epf->lock);
909 }
910 epc->init_complete = false;
911 mutex_unlock(&epc->list_lock);
912 }
913 EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
914
915 /**
916 * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
917 * device has received the Bus Master
918 * Enable event from the Root complex
919 * @epc: the EPC device that received the Bus Master Enable event
920 *
921 * Notify the EPF device that the EPC device has generated the Bus Master Enable
922 * event due to host setting the Bus Master Enable bit in the Command register.
923 */
pci_epc_bus_master_enable_notify(struct pci_epc * epc)924 void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
925 {
926 struct pci_epf *epf;
927
928 if (IS_ERR_OR_NULL(epc))
929 return;
930
931 mutex_lock(&epc->list_lock);
932 list_for_each_entry(epf, &epc->pci_epf, list) {
933 mutex_lock(&epf->lock);
934 if (epf->event_ops && epf->event_ops->bus_master_enable)
935 epf->event_ops->bus_master_enable(epf);
936 mutex_unlock(&epf->lock);
937 }
938 mutex_unlock(&epc->list_lock);
939 }
940 EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
941
942 /**
943 * pci_epc_destroy() - destroy the EPC device
944 * @epc: the EPC device that has to be destroyed
945 *
946 * Invoke to destroy the PCI EPC device
947 */
pci_epc_destroy(struct pci_epc * epc)948 void pci_epc_destroy(struct pci_epc *epc)
949 {
950 pci_ep_cfs_remove_epc_group(epc->group);
951 #ifdef CONFIG_PCI_DOMAINS_GENERIC
952 pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr);
953 #endif
954 device_unregister(&epc->dev);
955 }
956 EXPORT_SYMBOL_GPL(pci_epc_destroy);
957
pci_epc_release(struct device * dev)958 static void pci_epc_release(struct device *dev)
959 {
960 kfree(to_pci_epc(dev));
961 }
962
963 /**
964 * __pci_epc_create() - create a new endpoint controller (EPC) device
965 * @dev: device that is creating the new EPC
966 * @ops: function pointers for performing EPC operations
967 * @owner: the owner of the module that creates the EPC device
968 *
969 * Invoke to create a new EPC device and add it to pci_epc class.
970 */
971 struct pci_epc *
__pci_epc_create(struct device * dev,const struct pci_epc_ops * ops,struct module * owner)972 __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
973 struct module *owner)
974 {
975 int ret;
976 struct pci_epc *epc;
977
978 if (WARN_ON(!dev)) {
979 ret = -EINVAL;
980 goto err_ret;
981 }
982
983 epc = kzalloc(sizeof(*epc), GFP_KERNEL);
984 if (!epc) {
985 ret = -ENOMEM;
986 goto err_ret;
987 }
988
989 mutex_init(&epc->lock);
990 mutex_init(&epc->list_lock);
991 INIT_LIST_HEAD(&epc->pci_epf);
992
993 device_initialize(&epc->dev);
994 epc->dev.class = &pci_epc_class;
995 epc->dev.parent = dev;
996 epc->dev.release = pci_epc_release;
997 epc->ops = ops;
998
999 #ifdef CONFIG_PCI_DOMAINS_GENERIC
1000 epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
1001 #else
1002 /*
1003 * TODO: If the architecture doesn't support generic PCI
1004 * domains, then a custom implementation has to be used.
1005 */
1006 WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
1007 #endif
1008
1009 ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
1010 if (ret)
1011 goto put_dev;
1012
1013 ret = device_add(&epc->dev);
1014 if (ret)
1015 goto put_dev;
1016
1017 epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
1018
1019 return epc;
1020
1021 put_dev:
1022 put_device(&epc->dev);
1023
1024 err_ret:
1025 return ERR_PTR(ret);
1026 }
1027 EXPORT_SYMBOL_GPL(__pci_epc_create);
1028
1029 /**
1030 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
1031 * @dev: device that is creating the new EPC
1032 * @ops: function pointers for performing EPC operations
1033 * @owner: the owner of the module that creates the EPC device
1034 *
1035 * Invoke to create a new EPC device and add it to pci_epc class.
1036 * While at that, it also associates the device with the pci_epc using devres.
1037 * On driver detach, release function is invoked on the devres data,
1038 * then, devres data is freed.
1039 */
1040 struct pci_epc *
__devm_pci_epc_create(struct device * dev,const struct pci_epc_ops * ops,struct module * owner)1041 __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
1042 struct module *owner)
1043 {
1044 struct pci_epc **ptr, *epc;
1045
1046 ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
1047 if (!ptr)
1048 return ERR_PTR(-ENOMEM);
1049
1050 epc = __pci_epc_create(dev, ops, owner);
1051 if (!IS_ERR(epc)) {
1052 *ptr = epc;
1053 devres_add(dev, ptr);
1054 } else {
1055 devres_free(ptr);
1056 }
1057
1058 return epc;
1059 }
1060 EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
1061
pci_epc_init(void)1062 static int __init pci_epc_init(void)
1063 {
1064 return class_register(&pci_epc_class);
1065 }
1066 module_init(pci_epc_init);
1067
pci_epc_exit(void)1068 static void __exit pci_epc_exit(void)
1069 {
1070 class_unregister(&pci_epc_class);
1071 }
1072 module_exit(pci_epc_exit);
1073
1074 MODULE_DESCRIPTION("PCI EPC Library");
1075 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1076