1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Endpoint *Controller* (EPC) library
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 
13 #include <linux/pci-epc.h>
14 #include <linux/pci-epf.h>
15 #include <linux/pci-ep-cfs.h>
16 
17 static const struct class pci_epc_class = {
18 	.name = "pci_epc",
19 };
20 
21 static void devm_pci_epc_release(struct device *dev, void *res)
22 {
23 	struct pci_epc *epc = *(struct pci_epc **)res;
24 
25 	pci_epc_destroy(epc);
26 }
27 
28 /**
29  * pci_epc_put() - release the PCI endpoint controller
30  * @epc: epc returned by pci_epc_get()
31  *
32  * release the refcount the caller obtained by invoking pci_epc_get()
33  */
34 void pci_epc_put(struct pci_epc *epc)
35 {
36 	if (IS_ERR_OR_NULL(epc))
37 		return;
38 
39 	module_put(epc->ops->owner);
40 	put_device(&epc->dev);
41 }
42 EXPORT_SYMBOL_GPL(pci_epc_put);
43 
44 /**
45  * pci_epc_get() - get the PCI endpoint controller
46  * @epc_name: device name of the endpoint controller
47  *
48  * Invoke to get struct pci_epc * corresponding to the device name of the
49  * endpoint controller
50  */
51 struct pci_epc *pci_epc_get(const char *epc_name)
52 {
53 	int ret = -EINVAL;
54 	struct pci_epc *epc;
55 	struct device *dev;
56 
57 	dev = class_find_device_by_name(&pci_epc_class, epc_name);
58 	if (!dev)
59 		goto err;
60 
61 	epc = to_pci_epc(dev);
62 	if (try_module_get(epc->ops->owner))
63 		return epc;
64 
65 err:
66 	put_device(dev);
67 	return ERR_PTR(ret);
68 }
69 EXPORT_SYMBOL_GPL(pci_epc_get);
70 
71 /**
72  * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
73  * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
74  *
75  * Invoke to get the first unreserved BAR that can be used by the endpoint
76  * function.
77  */
78 enum pci_barno
79 pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
80 {
81 	return pci_epc_get_next_free_bar(epc_features, BAR_0);
82 }
83 EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
84 
85 /**
86  * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
87  * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
88  * @bar: the starting BAR number from where unreserved BAR should be searched
89  *
90  * Invoke to get the next unreserved BAR starting from @bar that can be used
91  * for endpoint function.
92  */
93 enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
94 					 *epc_features, enum pci_barno bar)
95 {
96 	int i;
97 
98 	if (!epc_features)
99 		return BAR_0;
100 
101 	/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
102 	if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
103 		bar++;
104 
105 	for (i = bar; i < PCI_STD_NUM_BARS; i++) {
106 		/* If the BAR is not reserved, return it. */
107 		if (epc_features->bar[i].type != BAR_RESERVED)
108 			return i;
109 	}
110 
111 	return NO_BAR;
112 }
113 EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
114 
115 static bool pci_epc_function_is_valid(struct pci_epc *epc,
116 				      u8 func_no, u8 vfunc_no)
117 {
118 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
119 		return false;
120 
121 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
122 		return false;
123 
124 	return true;
125 }
126 
127 /**
128  * pci_epc_get_features() - get the features supported by EPC
129  * @epc: the features supported by *this* EPC device will be returned
130  * @func_no: the features supported by the EPC device specific to the
131  *	     endpoint function with func_no will be returned
132  * @vfunc_no: the features supported by the EPC device specific to the
133  *	     virtual endpoint function with vfunc_no will be returned
134  *
135  * Invoke to get the features provided by the EPC which may be
136  * specific to an endpoint function. Returns pci_epc_features on success
137  * and NULL for any failures.
138  */
139 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
140 						    u8 func_no, u8 vfunc_no)
141 {
142 	const struct pci_epc_features *epc_features;
143 
144 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
145 		return NULL;
146 
147 	if (!epc->ops->get_features)
148 		return NULL;
149 
150 	mutex_lock(&epc->lock);
151 	epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
152 	mutex_unlock(&epc->lock);
153 
154 	return epc_features;
155 }
156 EXPORT_SYMBOL_GPL(pci_epc_get_features);
157 
158 /**
159  * pci_epc_stop() - stop the PCI link
160  * @epc: the link of the EPC device that has to be stopped
161  *
162  * Invoke to stop the PCI link
163  */
164 void pci_epc_stop(struct pci_epc *epc)
165 {
166 	if (IS_ERR(epc) || !epc->ops->stop)
167 		return;
168 
169 	mutex_lock(&epc->lock);
170 	epc->ops->stop(epc);
171 	mutex_unlock(&epc->lock);
172 }
173 EXPORT_SYMBOL_GPL(pci_epc_stop);
174 
175 /**
176  * pci_epc_start() - start the PCI link
177  * @epc: the link of *this* EPC device has to be started
178  *
179  * Invoke to start the PCI link
180  */
181 int pci_epc_start(struct pci_epc *epc)
182 {
183 	int ret;
184 
185 	if (IS_ERR(epc))
186 		return -EINVAL;
187 
188 	if (!epc->ops->start)
189 		return 0;
190 
191 	mutex_lock(&epc->lock);
192 	ret = epc->ops->start(epc);
193 	mutex_unlock(&epc->lock);
194 
195 	return ret;
196 }
197 EXPORT_SYMBOL_GPL(pci_epc_start);
198 
199 /**
200  * pci_epc_raise_irq() - interrupt the host system
201  * @epc: the EPC device which has to interrupt the host
202  * @func_no: the physical endpoint function number in the EPC device
203  * @vfunc_no: the virtual endpoint function number in the physical function
204  * @type: specify the type of interrupt; INTX, MSI or MSI-X
205  * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
206  *
207  * Invoke to raise an INTX, MSI or MSI-X interrupt
208  */
209 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
210 		      unsigned int type, u16 interrupt_num)
211 {
212 	int ret;
213 
214 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
215 		return -EINVAL;
216 
217 	if (!epc->ops->raise_irq)
218 		return 0;
219 
220 	mutex_lock(&epc->lock);
221 	ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
222 	mutex_unlock(&epc->lock);
223 
224 	return ret;
225 }
226 EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
227 
228 /**
229  * pci_epc_map_msi_irq() - Map physical address to MSI address and return
230  *                         MSI data
231  * @epc: the EPC device which has the MSI capability
232  * @func_no: the physical endpoint function number in the EPC device
233  * @vfunc_no: the virtual endpoint function number in the physical function
234  * @phys_addr: the physical address of the outbound region
235  * @interrupt_num: the MSI interrupt number with range (1-N)
236  * @entry_size: Size of Outbound address region for each interrupt
237  * @msi_data: the data that should be written in order to raise MSI interrupt
238  *            with interrupt number as 'interrupt num'
239  * @msi_addr_offset: Offset of MSI address from the aligned outbound address
240  *                   to which the MSI address is mapped
241  *
242  * Invoke to map physical address to MSI address and return MSI data. The
243  * physical address should be an address in the outbound region. This is
244  * required to implement doorbell functionality of NTB wherein EPC on either
245  * side of the interface (primary and secondary) can directly write to the
246  * physical address (in outbound region) of the other interface to ring
247  * doorbell.
248  */
249 int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
250 			phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
251 			u32 *msi_data, u32 *msi_addr_offset)
252 {
253 	int ret;
254 
255 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
256 		return -EINVAL;
257 
258 	if (!epc->ops->map_msi_irq)
259 		return -EINVAL;
260 
261 	mutex_lock(&epc->lock);
262 	ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
263 				    interrupt_num, entry_size, msi_data,
264 				    msi_addr_offset);
265 	mutex_unlock(&epc->lock);
266 
267 	return ret;
268 }
269 EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
270 
271 /**
272  * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
273  * @epc: the EPC device to which MSI interrupts was requested
274  * @func_no: the physical endpoint function number in the EPC device
275  * @vfunc_no: the virtual endpoint function number in the physical function
276  *
277  * Invoke to get the number of MSI interrupts allocated by the RC
278  */
279 int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
280 {
281 	int interrupt;
282 
283 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
284 		return 0;
285 
286 	if (!epc->ops->get_msi)
287 		return 0;
288 
289 	mutex_lock(&epc->lock);
290 	interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
291 	mutex_unlock(&epc->lock);
292 
293 	if (interrupt < 0)
294 		return 0;
295 
296 	return interrupt;
297 }
298 EXPORT_SYMBOL_GPL(pci_epc_get_msi);
299 
300 /**
301  * pci_epc_set_msi() - set the number of MSI interrupt numbers required
302  * @epc: the EPC device on which MSI has to be configured
303  * @func_no: the physical endpoint function number in the EPC device
304  * @vfunc_no: the virtual endpoint function number in the physical function
305  * @nr_irqs: number of MSI interrupts required by the EPF
306  *
307  * Invoke to set the required number of MSI interrupts.
308  */
309 int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 nr_irqs)
310 {
311 	int ret;
312 
313 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
314 		return -EINVAL;
315 
316 	if (nr_irqs < 1 || nr_irqs > 32)
317 		return -EINVAL;
318 
319 	if (!epc->ops->set_msi)
320 		return 0;
321 
322 	mutex_lock(&epc->lock);
323 	ret = epc->ops->set_msi(epc, func_no, vfunc_no, nr_irqs);
324 	mutex_unlock(&epc->lock);
325 
326 	return ret;
327 }
328 EXPORT_SYMBOL_GPL(pci_epc_set_msi);
329 
330 /**
331  * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
332  * @epc: the EPC device to which MSI-X interrupts was requested
333  * @func_no: the physical endpoint function number in the EPC device
334  * @vfunc_no: the virtual endpoint function number in the physical function
335  *
336  * Invoke to get the number of MSI-X interrupts allocated by the RC
337  */
338 int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
339 {
340 	int interrupt;
341 
342 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
343 		return 0;
344 
345 	if (!epc->ops->get_msix)
346 		return 0;
347 
348 	mutex_lock(&epc->lock);
349 	interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
350 	mutex_unlock(&epc->lock);
351 
352 	if (interrupt < 0)
353 		return 0;
354 
355 	return interrupt;
356 }
357 EXPORT_SYMBOL_GPL(pci_epc_get_msix);
358 
359 /**
360  * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
361  * @epc: the EPC device on which MSI-X has to be configured
362  * @func_no: the physical endpoint function number in the EPC device
363  * @vfunc_no: the virtual endpoint function number in the physical function
364  * @nr_irqs: number of MSI-X interrupts required by the EPF
365  * @bir: BAR where the MSI-X table resides
366  * @offset: Offset pointing to the start of MSI-X table
367  *
368  * Invoke to set the required number of MSI-X interrupts.
369  */
370 int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u16 nr_irqs,
371 		     enum pci_barno bir, u32 offset)
372 {
373 	int ret;
374 
375 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
376 		return -EINVAL;
377 
378 	if (nr_irqs < 1 || nr_irqs > 2048)
379 		return -EINVAL;
380 
381 	if (!epc->ops->set_msix)
382 		return 0;
383 
384 	mutex_lock(&epc->lock);
385 	ret = epc->ops->set_msix(epc, func_no, vfunc_no, nr_irqs, bir, offset);
386 	mutex_unlock(&epc->lock);
387 
388 	return ret;
389 }
390 EXPORT_SYMBOL_GPL(pci_epc_set_msix);
391 
392 /**
393  * pci_epc_unmap_addr() - unmap CPU address from PCI address
394  * @epc: the EPC device on which address is allocated
395  * @func_no: the physical endpoint function number in the EPC device
396  * @vfunc_no: the virtual endpoint function number in the physical function
397  * @phys_addr: physical address of the local system
398  *
399  * Invoke to unmap the CPU address from PCI address.
400  */
401 void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
402 			phys_addr_t phys_addr)
403 {
404 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
405 		return;
406 
407 	if (!epc->ops->unmap_addr)
408 		return;
409 
410 	mutex_lock(&epc->lock);
411 	epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
412 	mutex_unlock(&epc->lock);
413 }
414 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
415 
416 /**
417  * pci_epc_map_addr() - map CPU address to PCI address
418  * @epc: the EPC device on which address is allocated
419  * @func_no: the physical endpoint function number in the EPC device
420  * @vfunc_no: the virtual endpoint function number in the physical function
421  * @phys_addr: physical address of the local system
422  * @pci_addr: PCI address to which the physical address should be mapped
423  * @size: the size of the allocation
424  *
425  * Invoke to map CPU address with PCI address.
426  */
427 int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
428 		     phys_addr_t phys_addr, u64 pci_addr, size_t size)
429 {
430 	int ret;
431 
432 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
433 		return -EINVAL;
434 
435 	if (!epc->ops->map_addr)
436 		return 0;
437 
438 	mutex_lock(&epc->lock);
439 	ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
440 				 size);
441 	mutex_unlock(&epc->lock);
442 
443 	return ret;
444 }
445 EXPORT_SYMBOL_GPL(pci_epc_map_addr);
446 
447 /**
448  * pci_epc_mem_map() - allocate and map a PCI address to a CPU address
449  * @epc: the EPC device on which the CPU address is to be allocated and mapped
450  * @func_no: the physical endpoint function number in the EPC device
451  * @vfunc_no: the virtual endpoint function number in the physical function
452  * @pci_addr: PCI address to which the CPU address should be mapped
453  * @pci_size: the number of bytes to map starting from @pci_addr
454  * @map: where to return the mapping information
455  *
456  * Allocate a controller memory address region and map it to a RC PCI address
457  * region, taking into account the controller physical address mapping
458  * constraints using the controller operation align_addr(). If this operation is
459  * not defined, we assume that there are no alignment constraints for the
460  * mapping.
461  *
462  * The effective size of the PCI address range mapped from @pci_addr is
463  * indicated by @map->pci_size. This size may be less than the requested
464  * @pci_size. The local virtual CPU address for the mapping is indicated by
465  * @map->virt_addr (@map->phys_addr indicates the physical address).
466  * The size and CPU address of the controller memory allocated and mapped are
467  * respectively indicated by @map->map_size and @map->virt_base (and
468  * @map->phys_base for the physical address of @map->virt_base).
469  *
470  * Returns 0 on success and a negative error code in case of error.
471  */
472 int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
473 		    u64 pci_addr, size_t pci_size, struct pci_epc_map *map)
474 {
475 	size_t map_size = pci_size;
476 	size_t map_offset = 0;
477 	int ret;
478 
479 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
480 		return -EINVAL;
481 
482 	if (!pci_size || !map)
483 		return -EINVAL;
484 
485 	/*
486 	 * Align the PCI address to map. If the controller defines the
487 	 * .align_addr() operation, use it to determine the PCI address to map
488 	 * and the size of the mapping. Otherwise, assume that the controller
489 	 * has no alignment constraint.
490 	 */
491 	memset(map, 0, sizeof(*map));
492 	map->pci_addr = pci_addr;
493 	if (epc->ops->align_addr)
494 		map->map_pci_addr =
495 			epc->ops->align_addr(epc, pci_addr,
496 					     &map_size, &map_offset);
497 	else
498 		map->map_pci_addr = pci_addr;
499 	map->map_size = map_size;
500 	if (map->map_pci_addr + map->map_size < pci_addr + pci_size)
501 		map->pci_size = map->map_pci_addr + map->map_size - pci_addr;
502 	else
503 		map->pci_size = pci_size;
504 
505 	map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base,
506 						map->map_size);
507 	if (!map->virt_base)
508 		return -ENOMEM;
509 
510 	map->phys_addr = map->phys_base + map_offset;
511 	map->virt_addr = map->virt_base + map_offset;
512 
513 	ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base,
514 			       map->map_pci_addr, map->map_size);
515 	if (ret) {
516 		pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
517 				      map->map_size);
518 		return ret;
519 	}
520 
521 	return 0;
522 }
523 EXPORT_SYMBOL_GPL(pci_epc_mem_map);
524 
525 /**
526  * pci_epc_mem_unmap() - unmap and free a CPU address region
527  * @epc: the EPC device on which the CPU address is allocated and mapped
528  * @func_no: the physical endpoint function number in the EPC device
529  * @vfunc_no: the virtual endpoint function number in the physical function
530  * @map: the mapping information
531  *
532  * Unmap and free a CPU address region that was allocated and mapped with
533  * pci_epc_mem_map().
534  */
535 void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
536 		       struct pci_epc_map *map)
537 {
538 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
539 		return;
540 
541 	if (!map || !map->virt_base)
542 		return;
543 
544 	pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base);
545 	pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
546 			      map->map_size);
547 }
548 EXPORT_SYMBOL_GPL(pci_epc_mem_unmap);
549 
550 /**
551  * pci_epc_clear_bar() - reset the BAR
552  * @epc: the EPC device for which the BAR has to be cleared
553  * @func_no: the physical endpoint function number in the EPC device
554  * @vfunc_no: the virtual endpoint function number in the physical function
555  * @epf_bar: the struct epf_bar that contains the BAR information
556  *
557  * Invoke to reset the BAR of the endpoint device.
558  */
559 void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
560 		       struct pci_epf_bar *epf_bar)
561 {
562 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
563 		return;
564 
565 	if (epf_bar->barno == BAR_5 &&
566 	    epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
567 		return;
568 
569 	if (!epc->ops->clear_bar)
570 		return;
571 
572 	mutex_lock(&epc->lock);
573 	epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
574 	mutex_unlock(&epc->lock);
575 }
576 EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
577 
578 /**
579  * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
580  * @epc: the EPC device on which BAR has to be configured
581  * @func_no: the physical endpoint function number in the EPC device
582  * @vfunc_no: the virtual endpoint function number in the physical function
583  * @epf_bar: the struct epf_bar that contains the BAR information
584  *
585  * Invoke to configure the BAR of the endpoint device.
586  */
587 int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
588 		    struct pci_epf_bar *epf_bar)
589 {
590 	const struct pci_epc_features *epc_features;
591 	enum pci_barno bar = epf_bar->barno;
592 	int flags = epf_bar->flags;
593 	int ret;
594 
595 	epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
596 	if (!epc_features)
597 		return -EINVAL;
598 
599 	if (epc_features->bar[bar].type == BAR_RESIZABLE &&
600 	    (epf_bar->size < SZ_1M || (u64)epf_bar->size > (SZ_128G * 1024)))
601 		return -EINVAL;
602 
603 	if (epc_features->bar[bar].type == BAR_FIXED &&
604 	    (epc_features->bar[bar].fixed_size != epf_bar->size))
605 		return -EINVAL;
606 
607 	if (!is_power_of_2(epf_bar->size))
608 		return -EINVAL;
609 
610 	if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
611 	    (flags & PCI_BASE_ADDRESS_SPACE_IO &&
612 	     flags & PCI_BASE_ADDRESS_IO_MASK) ||
613 	    (upper_32_bits(epf_bar->size) &&
614 	     !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
615 		return -EINVAL;
616 
617 	if (!epc->ops->set_bar)
618 		return 0;
619 
620 	mutex_lock(&epc->lock);
621 	ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
622 	mutex_unlock(&epc->lock);
623 
624 	return ret;
625 }
626 EXPORT_SYMBOL_GPL(pci_epc_set_bar);
627 
628 /**
629  * pci_epc_bar_size_to_rebar_cap() - convert a size to the representation used
630  *				     by the Resizable BAR Capability Register
631  * @size: the size to convert
632  * @cap: where to store the result
633  *
634  * Returns 0 on success and a negative error code in case of error.
635  */
636 int pci_epc_bar_size_to_rebar_cap(size_t size, u32 *cap)
637 {
638 	/*
639 	 * As per PCIe r6.0, sec 7.8.6.2, min size for a resizable BAR is 1 MB,
640 	 * thus disallow a requested BAR size smaller than 1 MB.
641 	 * Disallow a requested BAR size larger than 128 TB.
642 	 */
643 	if (size < SZ_1M || (u64)size > (SZ_128G * 1024))
644 		return -EINVAL;
645 
646 	*cap = ilog2(size) - ilog2(SZ_1M);
647 
648 	/* Sizes in REBAR_CAP start at BIT(4). */
649 	*cap = BIT(*cap + 4);
650 
651 	return 0;
652 }
653 EXPORT_SYMBOL_GPL(pci_epc_bar_size_to_rebar_cap);
654 
655 /**
656  * pci_epc_write_header() - write standard configuration header
657  * @epc: the EPC device to which the configuration header should be written
658  * @func_no: the physical endpoint function number in the EPC device
659  * @vfunc_no: the virtual endpoint function number in the physical function
660  * @header: standard configuration header fields
661  *
662  * Invoke to write the configuration header to the endpoint controller. Every
663  * endpoint controller will have a dedicated location to which the standard
664  * configuration header would be written. The callback function should write
665  * the header fields to this dedicated location.
666  */
667 int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
668 			 struct pci_epf_header *header)
669 {
670 	int ret;
671 
672 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
673 		return -EINVAL;
674 
675 	/* Only Virtual Function #1 has deviceID */
676 	if (vfunc_no > 1)
677 		return -EINVAL;
678 
679 	if (!epc->ops->write_header)
680 		return 0;
681 
682 	mutex_lock(&epc->lock);
683 	ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
684 	mutex_unlock(&epc->lock);
685 
686 	return ret;
687 }
688 EXPORT_SYMBOL_GPL(pci_epc_write_header);
689 
690 /**
691  * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
692  * @epc: the EPC device to which the endpoint function should be added
693  * @epf: the endpoint function to be added
694  * @type: Identifies if the EPC is connected to the primary or secondary
695  *        interface of EPF
696  *
697  * A PCI endpoint device can have one or more functions. In the case of PCIe,
698  * the specification allows up to 8 PCIe endpoint functions. Invoke
699  * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
700  */
701 int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
702 		    enum pci_epc_interface_type type)
703 {
704 	struct list_head *list;
705 	u32 func_no;
706 	int ret = 0;
707 
708 	if (IS_ERR_OR_NULL(epc) || epf->is_vf)
709 		return -EINVAL;
710 
711 	if (type == PRIMARY_INTERFACE && epf->epc)
712 		return -EBUSY;
713 
714 	if (type == SECONDARY_INTERFACE && epf->sec_epc)
715 		return -EBUSY;
716 
717 	mutex_lock(&epc->list_lock);
718 	func_no = find_first_zero_bit(&epc->function_num_map,
719 				      BITS_PER_LONG);
720 	if (func_no >= BITS_PER_LONG) {
721 		ret = -EINVAL;
722 		goto ret;
723 	}
724 
725 	if (func_no > epc->max_functions - 1) {
726 		dev_err(&epc->dev, "Exceeding max supported Function Number\n");
727 		ret = -EINVAL;
728 		goto ret;
729 	}
730 
731 	set_bit(func_no, &epc->function_num_map);
732 	if (type == PRIMARY_INTERFACE) {
733 		epf->func_no = func_no;
734 		epf->epc = epc;
735 		list = &epf->list;
736 	} else {
737 		epf->sec_epc_func_no = func_no;
738 		epf->sec_epc = epc;
739 		list = &epf->sec_epc_list;
740 	}
741 
742 	list_add_tail(list, &epc->pci_epf);
743 ret:
744 	mutex_unlock(&epc->list_lock);
745 
746 	return ret;
747 }
748 EXPORT_SYMBOL_GPL(pci_epc_add_epf);
749 
750 /**
751  * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
752  * @epc: the EPC device from which the endpoint function should be removed
753  * @epf: the endpoint function to be removed
754  * @type: identifies if the EPC is connected to the primary or secondary
755  *        interface of EPF
756  *
757  * Invoke to remove PCI endpoint function from the endpoint controller.
758  */
759 void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
760 			enum pci_epc_interface_type type)
761 {
762 	struct list_head *list;
763 	u32 func_no = 0;
764 
765 	if (IS_ERR_OR_NULL(epc) || !epf)
766 		return;
767 
768 	mutex_lock(&epc->list_lock);
769 	if (type == PRIMARY_INTERFACE) {
770 		func_no = epf->func_no;
771 		list = &epf->list;
772 		epf->epc = NULL;
773 	} else {
774 		func_no = epf->sec_epc_func_no;
775 		list = &epf->sec_epc_list;
776 		epf->sec_epc = NULL;
777 	}
778 	clear_bit(func_no, &epc->function_num_map);
779 	list_del(list);
780 	mutex_unlock(&epc->list_lock);
781 }
782 EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
783 
784 /**
785  * pci_epc_linkup() - Notify the EPF device that EPC device has established a
786  *		      connection with the Root Complex.
787  * @epc: the EPC device which has established link with the host
788  *
789  * Invoke to Notify the EPF device that the EPC device has established a
790  * connection with the Root Complex.
791  */
792 void pci_epc_linkup(struct pci_epc *epc)
793 {
794 	struct pci_epf *epf;
795 
796 	if (IS_ERR_OR_NULL(epc))
797 		return;
798 
799 	mutex_lock(&epc->list_lock);
800 	list_for_each_entry(epf, &epc->pci_epf, list) {
801 		mutex_lock(&epf->lock);
802 		if (epf->event_ops && epf->event_ops->link_up)
803 			epf->event_ops->link_up(epf);
804 		mutex_unlock(&epf->lock);
805 	}
806 	mutex_unlock(&epc->list_lock);
807 }
808 EXPORT_SYMBOL_GPL(pci_epc_linkup);
809 
810 /**
811  * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
812  *			connection with the Root Complex.
813  * @epc: the EPC device which has dropped the link with the host
814  *
815  * Invoke to Notify the EPF device that the EPC device has dropped the
816  * connection with the Root Complex.
817  */
818 void pci_epc_linkdown(struct pci_epc *epc)
819 {
820 	struct pci_epf *epf;
821 
822 	if (IS_ERR_OR_NULL(epc))
823 		return;
824 
825 	mutex_lock(&epc->list_lock);
826 	list_for_each_entry(epf, &epc->pci_epf, list) {
827 		mutex_lock(&epf->lock);
828 		if (epf->event_ops && epf->event_ops->link_down)
829 			epf->event_ops->link_down(epf);
830 		mutex_unlock(&epf->lock);
831 	}
832 	mutex_unlock(&epc->list_lock);
833 }
834 EXPORT_SYMBOL_GPL(pci_epc_linkdown);
835 
836 /**
837  * pci_epc_init_notify() - Notify the EPF device that EPC device initialization
838  *                         is completed.
839  * @epc: the EPC device whose initialization is completed
840  *
841  * Invoke to Notify the EPF device that the EPC device's initialization
842  * is completed.
843  */
844 void pci_epc_init_notify(struct pci_epc *epc)
845 {
846 	struct pci_epf *epf;
847 
848 	if (IS_ERR_OR_NULL(epc))
849 		return;
850 
851 	mutex_lock(&epc->list_lock);
852 	list_for_each_entry(epf, &epc->pci_epf, list) {
853 		mutex_lock(&epf->lock);
854 		if (epf->event_ops && epf->event_ops->epc_init)
855 			epf->event_ops->epc_init(epf);
856 		mutex_unlock(&epf->lock);
857 	}
858 	epc->init_complete = true;
859 	mutex_unlock(&epc->list_lock);
860 }
861 EXPORT_SYMBOL_GPL(pci_epc_init_notify);
862 
863 /**
864  * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
865  *                                 complete to the EPF device
866  * @epc: the EPC device whose initialization is pending to be notified
867  * @epf: the EPF device to be notified
868  *
869  * Invoke to notify the pending EPC device initialization complete to the EPF
870  * device. This is used to deliver the notification if the EPC initialization
871  * got completed before the EPF driver bind.
872  */
873 void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
874 {
875 	if (epc->init_complete) {
876 		mutex_lock(&epf->lock);
877 		if (epf->event_ops && epf->event_ops->epc_init)
878 			epf->event_ops->epc_init(epf);
879 		mutex_unlock(&epf->lock);
880 	}
881 }
882 EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
883 
884 /**
885  * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
886  * @epc: the EPC device whose deinitialization is completed
887  *
888  * Invoke to notify the EPF device that the EPC deinitialization is completed.
889  */
890 void pci_epc_deinit_notify(struct pci_epc *epc)
891 {
892 	struct pci_epf *epf;
893 
894 	if (IS_ERR_OR_NULL(epc))
895 		return;
896 
897 	mutex_lock(&epc->list_lock);
898 	list_for_each_entry(epf, &epc->pci_epf, list) {
899 		mutex_lock(&epf->lock);
900 		if (epf->event_ops && epf->event_ops->epc_deinit)
901 			epf->event_ops->epc_deinit(epf);
902 		mutex_unlock(&epf->lock);
903 	}
904 	epc->init_complete = false;
905 	mutex_unlock(&epc->list_lock);
906 }
907 EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
908 
909 /**
910  * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
911  *					device has received the Bus Master
912  *					Enable event from the Root complex
913  * @epc: the EPC device that received the Bus Master Enable event
914  *
915  * Notify the EPF device that the EPC device has generated the Bus Master Enable
916  * event due to host setting the Bus Master Enable bit in the Command register.
917  */
918 void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
919 {
920 	struct pci_epf *epf;
921 
922 	if (IS_ERR_OR_NULL(epc))
923 		return;
924 
925 	mutex_lock(&epc->list_lock);
926 	list_for_each_entry(epf, &epc->pci_epf, list) {
927 		mutex_lock(&epf->lock);
928 		if (epf->event_ops && epf->event_ops->bus_master_enable)
929 			epf->event_ops->bus_master_enable(epf);
930 		mutex_unlock(&epf->lock);
931 	}
932 	mutex_unlock(&epc->list_lock);
933 }
934 EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
935 
936 /**
937  * pci_epc_destroy() - destroy the EPC device
938  * @epc: the EPC device that has to be destroyed
939  *
940  * Invoke to destroy the PCI EPC device
941  */
942 void pci_epc_destroy(struct pci_epc *epc)
943 {
944 	pci_ep_cfs_remove_epc_group(epc->group);
945 #ifdef CONFIG_PCI_DOMAINS_GENERIC
946 	pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr);
947 #endif
948 	device_unregister(&epc->dev);
949 }
950 EXPORT_SYMBOL_GPL(pci_epc_destroy);
951 
952 static void pci_epc_release(struct device *dev)
953 {
954 	kfree(to_pci_epc(dev));
955 }
956 
957 /**
958  * __pci_epc_create() - create a new endpoint controller (EPC) device
959  * @dev: device that is creating the new EPC
960  * @ops: function pointers for performing EPC operations
961  * @owner: the owner of the module that creates the EPC device
962  *
963  * Invoke to create a new EPC device and add it to pci_epc class.
964  */
965 struct pci_epc *
966 __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
967 		 struct module *owner)
968 {
969 	int ret;
970 	struct pci_epc *epc;
971 
972 	if (WARN_ON(!dev)) {
973 		ret = -EINVAL;
974 		goto err_ret;
975 	}
976 
977 	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
978 	if (!epc) {
979 		ret = -ENOMEM;
980 		goto err_ret;
981 	}
982 
983 	mutex_init(&epc->lock);
984 	mutex_init(&epc->list_lock);
985 	INIT_LIST_HEAD(&epc->pci_epf);
986 
987 	device_initialize(&epc->dev);
988 	epc->dev.class = &pci_epc_class;
989 	epc->dev.parent = dev;
990 	epc->dev.release = pci_epc_release;
991 	epc->ops = ops;
992 
993 #ifdef CONFIG_PCI_DOMAINS_GENERIC
994 	epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
995 #else
996 	/*
997 	 * TODO: If the architecture doesn't support generic PCI
998 	 * domains, then a custom implementation has to be used.
999 	 */
1000 	WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
1001 #endif
1002 
1003 	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
1004 	if (ret)
1005 		goto put_dev;
1006 
1007 	ret = device_add(&epc->dev);
1008 	if (ret)
1009 		goto put_dev;
1010 
1011 	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
1012 
1013 	return epc;
1014 
1015 put_dev:
1016 	put_device(&epc->dev);
1017 
1018 err_ret:
1019 	return ERR_PTR(ret);
1020 }
1021 EXPORT_SYMBOL_GPL(__pci_epc_create);
1022 
1023 /**
1024  * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
1025  * @dev: device that is creating the new EPC
1026  * @ops: function pointers for performing EPC operations
1027  * @owner: the owner of the module that creates the EPC device
1028  *
1029  * Invoke to create a new EPC device and add it to pci_epc class.
1030  * While at that, it also associates the device with the pci_epc using devres.
1031  * On driver detach, release function is invoked on the devres data,
1032  * then, devres data is freed.
1033  */
1034 struct pci_epc *
1035 __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
1036 		      struct module *owner)
1037 {
1038 	struct pci_epc **ptr, *epc;
1039 
1040 	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
1041 	if (!ptr)
1042 		return ERR_PTR(-ENOMEM);
1043 
1044 	epc = __pci_epc_create(dev, ops, owner);
1045 	if (!IS_ERR(epc)) {
1046 		*ptr = epc;
1047 		devres_add(dev, ptr);
1048 	} else {
1049 		devres_free(ptr);
1050 	}
1051 
1052 	return epc;
1053 }
1054 EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
1055 
1056 static int __init pci_epc_init(void)
1057 {
1058 	return class_register(&pci_epc_class);
1059 }
1060 module_init(pci_epc_init);
1061 
1062 static void __exit pci_epc_exit(void)
1063 {
1064 	class_unregister(&pci_epc_class);
1065 }
1066 module_exit(pci_epc_exit);
1067 
1068 MODULE_DESCRIPTION("PCI EPC Library");
1069 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1070