1 /*-
2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd.
3 * All rights reserved.
4 * Copyright (c) 2020-2025 The FreeBSD Foundation
5 *
6 * Portions of this software were developed by Björn Zeeb
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
14 * disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/fcntl.h>
40 #include <sys/file.h>
41 #include <sys/filio.h>
42 #include <sys/pciio.h>
43 #include <sys/pctrie.h>
44 #include <sys/rman.h>
45 #include <sys/rwlock.h>
46 #include <sys/stdarg.h>
47
48 #include <vm/vm.h>
49 #include <vm/pmap.h>
50
51 #include <machine/bus.h>
52 #include <machine/resource.h>
53
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pci_private.h>
56 #include <dev/pci/pci_iov.h>
57 #include <dev/backlight/backlight.h>
58
59 #include <linux/kernel.h>
60 #include <linux/kobject.h>
61 #include <linux/device.h>
62 #include <linux/slab.h>
63 #include <linux/module.h>
64 #include <linux/cdev.h>
65 #include <linux/file.h>
66 #include <linux/sysfs.h>
67 #include <linux/mm.h>
68 #include <linux/io.h>
69 #include <linux/vmalloc.h>
70 #define WANT_NATIVE_PCI_GET_SLOT
71 #include <linux/pci.h>
72 #include <linux/compat.h>
73
74 #include <linux/backlight.h>
75
76 #include "backlight_if.h"
77 #include "pcib_if.h"
78
79 /* Undef the linux function macro defined in linux/pci.h */
80 #undef pci_get_class
81
82 extern int linuxkpi_debug;
83
84 SYSCTL_DECL(_compat_linuxkpi);
85
86 static counter_u64_t lkpi_pci_nseg1_fail;
87 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD,
88 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment");
89
90 static device_probe_t linux_pci_probe;
91 static device_attach_t linux_pci_attach;
92 static device_detach_t linux_pci_detach;
93 static device_suspend_t linux_pci_suspend;
94 static device_resume_t linux_pci_resume;
95 static device_shutdown_t linux_pci_shutdown;
96 static pci_iov_init_t linux_pci_iov_init;
97 static pci_iov_uninit_t linux_pci_iov_uninit;
98 static pci_iov_add_vf_t linux_pci_iov_add_vf;
99 static int linux_backlight_get_status(device_t dev, struct backlight_props *props);
100 static int linux_backlight_update_status(device_t dev, struct backlight_props *props);
101 static int linux_backlight_get_info(device_t dev, struct backlight_info *info);
102 static void lkpi_pcim_iomap_table_release(struct device *, void *);
103
104 static device_method_t pci_methods[] = {
105 DEVMETHOD(device_probe, linux_pci_probe),
106 DEVMETHOD(device_attach, linux_pci_attach),
107 DEVMETHOD(device_detach, linux_pci_detach),
108 DEVMETHOD(device_suspend, linux_pci_suspend),
109 DEVMETHOD(device_resume, linux_pci_resume),
110 DEVMETHOD(device_shutdown, linux_pci_shutdown),
111 DEVMETHOD(pci_iov_init, linux_pci_iov_init),
112 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit),
113 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf),
114
115 /* Bus interface. */
116 DEVMETHOD(bus_add_child, bus_generic_add_child),
117
118 /* backlight interface */
119 DEVMETHOD(backlight_update_status, linux_backlight_update_status),
120 DEVMETHOD(backlight_get_status, linux_backlight_get_status),
121 DEVMETHOD(backlight_get_info, linux_backlight_get_info),
122 DEVMETHOD_END
123 };
124
125 const char *pci_power_names[] = {
126 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold"
127 };
128
129 /* We need some meta-struct to keep track of these for devres. */
130 struct pci_devres {
131 bool enable_io;
132 /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */
133 uint8_t region_mask;
134 struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */
135 };
136 struct pcim_iomap_devres {
137 void *mmio_table[PCIR_MAX_BAR_0 + 1];
138 struct resource *res_table[PCIR_MAX_BAR_0 + 1];
139 };
140
141 struct linux_dma_priv {
142 uint64_t dma_mask;
143 bus_dma_tag_t dmat;
144 uint64_t dma_coherent_mask;
145 bus_dma_tag_t dmat_coherent;
146 struct mtx lock;
147 struct pctrie ptree;
148 };
149 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock)
150 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock)
151
152 static void
lkpi_set_pcim_iomap_devres(struct pcim_iomap_devres * dr,int bar,void * res)153 lkpi_set_pcim_iomap_devres(struct pcim_iomap_devres *dr, int bar,
154 void *res)
155 {
156 dr->mmio_table[bar] = (void *)rman_get_bushandle(res);
157 dr->res_table[bar] = res;
158 }
159
160 static bool
lkpi_pci_bar_id_valid(int bar)161 lkpi_pci_bar_id_valid(int bar)
162 {
163 if (bar < 0 || bar > PCIR_MAX_BAR_0)
164 return (false);
165
166 return (true);
167 }
168
169 static int
linux_pdev_dma_uninit(struct pci_dev * pdev)170 linux_pdev_dma_uninit(struct pci_dev *pdev)
171 {
172 struct linux_dma_priv *priv;
173
174 priv = pdev->dev.dma_priv;
175 if (priv->dmat)
176 bus_dma_tag_destroy(priv->dmat);
177 if (priv->dmat_coherent)
178 bus_dma_tag_destroy(priv->dmat_coherent);
179 mtx_destroy(&priv->lock);
180 pdev->dev.dma_priv = NULL;
181 free(priv, M_DEVBUF);
182 return (0);
183 }
184
185 static int
linux_pdev_dma_init(struct pci_dev * pdev)186 linux_pdev_dma_init(struct pci_dev *pdev)
187 {
188 struct linux_dma_priv *priv;
189 int error;
190
191 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO);
192
193 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF);
194 pctrie_init(&priv->ptree);
195
196 pdev->dev.dma_priv = priv;
197
198 /* Create a default DMA tags. */
199 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64));
200 if (error != 0)
201 goto err;
202 /* Coherent is lower 32bit only by default in Linux. */
203 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32));
204 if (error != 0)
205 goto err;
206
207 return (error);
208
209 err:
210 linux_pdev_dma_uninit(pdev);
211 return (error);
212 }
213
214 int
linux_dma_tag_init(struct device * dev,u64 dma_mask)215 linux_dma_tag_init(struct device *dev, u64 dma_mask)
216 {
217 struct linux_dma_priv *priv;
218 int error;
219
220 priv = dev->dma_priv;
221
222 if (priv->dmat) {
223 if (priv->dma_mask == dma_mask)
224 return (0);
225
226 bus_dma_tag_destroy(priv->dmat);
227 }
228
229 priv->dma_mask = dma_mask;
230
231 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
232 1, 0, /* alignment, boundary */
233 dma_mask, /* lowaddr */
234 BUS_SPACE_MAXADDR, /* highaddr */
235 NULL, NULL, /* filtfunc, filtfuncarg */
236 BUS_SPACE_MAXSIZE, /* maxsize */
237 1, /* nsegments */
238 BUS_SPACE_MAXSIZE, /* maxsegsz */
239 0, /* flags */
240 NULL, NULL, /* lockfunc, lockfuncarg */
241 &priv->dmat);
242 return (-error);
243 }
244
245 int
linux_dma_tag_init_coherent(struct device * dev,u64 dma_mask)246 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask)
247 {
248 struct linux_dma_priv *priv;
249 int error;
250
251 priv = dev->dma_priv;
252
253 if (priv->dmat_coherent) {
254 if (priv->dma_coherent_mask == dma_mask)
255 return (0);
256
257 bus_dma_tag_destroy(priv->dmat_coherent);
258 }
259
260 priv->dma_coherent_mask = dma_mask;
261
262 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
263 1, 0, /* alignment, boundary */
264 dma_mask, /* lowaddr */
265 BUS_SPACE_MAXADDR, /* highaddr */
266 NULL, NULL, /* filtfunc, filtfuncarg */
267 BUS_SPACE_MAXSIZE, /* maxsize */
268 1, /* nsegments */
269 BUS_SPACE_MAXSIZE, /* maxsegsz */
270 0, /* flags */
271 NULL, NULL, /* lockfunc, lockfuncarg */
272 &priv->dmat_coherent);
273 return (-error);
274 }
275
276 static struct pci_driver *
linux_pci_find(device_t dev,const struct pci_device_id ** idp)277 linux_pci_find(device_t dev, const struct pci_device_id **idp)
278 {
279 const struct pci_device_id *id;
280 struct pci_driver *pdrv;
281 uint16_t vendor;
282 uint16_t device;
283 uint16_t subvendor;
284 uint16_t subdevice;
285
286 vendor = pci_get_vendor(dev);
287 device = pci_get_device(dev);
288 subvendor = pci_get_subvendor(dev);
289 subdevice = pci_get_subdevice(dev);
290
291 spin_lock(&pci_lock);
292 list_for_each_entry(pdrv, &pci_drivers, node) {
293 for (id = pdrv->id_table; id->vendor != 0; id++) {
294 if (vendor == id->vendor &&
295 (PCI_ANY_ID == id->device || device == id->device) &&
296 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) &&
297 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) {
298 *idp = id;
299 spin_unlock(&pci_lock);
300 return (pdrv);
301 }
302 }
303 }
304 spin_unlock(&pci_lock);
305 return (NULL);
306 }
307
308 struct pci_dev *
lkpi_pci_get_device(uint32_t vendor,uint32_t device,struct pci_dev * odev)309 lkpi_pci_get_device(uint32_t vendor, uint32_t device, struct pci_dev *odev)
310 {
311 struct pci_dev *pdev, *found;
312
313 found = NULL;
314 spin_lock(&pci_lock);
315 list_for_each_entry(pdev, &pci_devices, links) {
316 /* Walk until we find odev. */
317 if (odev != NULL) {
318 if (pdev == odev)
319 odev = NULL;
320 continue;
321 }
322
323 if ((pdev->vendor == vendor || vendor == PCI_ANY_ID) &&
324 (pdev->device == device || device == PCI_ANY_ID)) {
325 found = pdev;
326 break;
327 }
328 }
329 pci_dev_get(found);
330 spin_unlock(&pci_lock);
331
332 return (found);
333 }
334
335 static void
lkpi_pci_dev_release(struct device * dev)336 lkpi_pci_dev_release(struct device *dev)
337 {
338
339 lkpi_devres_release_free_list(dev);
340 spin_lock_destroy(&dev->devres_lock);
341 }
342
343 static int
lkpifill_pci_dev(device_t dev,struct pci_dev * pdev)344 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
345 {
346 struct pci_devinfo *dinfo;
347 int error;
348
349 error = kobject_init_and_add(&pdev->dev.kobj, &linux_dev_ktype,
350 &linux_root_device.kobj, device_get_nameunit(dev));
351 if (error != 0) {
352 printf("%s:%d: kobject_init_and_add returned %d\n",
353 __func__, __LINE__, error);
354 return (error);
355 }
356
357 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev));
358 pdev->vendor = pci_get_vendor(dev);
359 pdev->device = pci_get_device(dev);
360 pdev->subsystem_vendor = pci_get_subvendor(dev);
361 pdev->subsystem_device = pci_get_subdevice(dev);
362 pdev->class = pci_get_class(dev);
363 pdev->revision = pci_get_revid(dev);
364 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d",
365 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
366 pci_get_function(dev));
367
368 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO);
369 pdev->bus->number = pci_get_bus(dev);
370 pdev->bus->domain = pci_get_domain(dev);
371
372 /* Check if we have reached the root to satisfy pci_is_root_bus() */
373 dinfo = device_get_ivars(dev);
374 if (dinfo->cfg.pcie.pcie_location != 0 &&
375 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) {
376 pdev->bus->self = NULL;
377 } else {
378 /*
379 * This should be the upstream bridge; pci_upstream_bridge()
380 * handles that case on demand as otherwise we'll shadow the
381 * entire PCI hierarchy.
382 */
383 pdev->bus->self = pdev;
384 }
385 pdev->dev.bsddev = dev;
386 pdev->dev.parent = &linux_root_device;
387 pdev->dev.release = lkpi_pci_dev_release;
388
389 if (pci_msi_count(dev) > 0)
390 pdev->msi_desc = malloc(pci_msi_count(dev) *
391 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO);
392
393 TAILQ_INIT(&pdev->mmio);
394 spin_lock_init(&pdev->pcie_cap_lock);
395 spin_lock_init(&pdev->dev.devres_lock);
396 INIT_LIST_HEAD(&pdev->dev.devres_head);
397 INIT_LIST_HEAD(&pdev->dev.irqents);
398
399 return (0);
400 }
401
402 static void
lkpinew_pci_dev_release(struct device * dev)403 lkpinew_pci_dev_release(struct device *dev)
404 {
405 struct pci_dev *pdev;
406 int i;
407
408 pdev = to_pci_dev(dev);
409 if (pdev->root != NULL)
410 pci_dev_put(pdev->root);
411 if (pdev->bus->self != pdev && pdev->bus->self != NULL)
412 pci_dev_put(pdev->bus->self);
413 free(pdev->bus, M_DEVBUF);
414 if (pdev->msi_desc != NULL) {
415 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--)
416 free(pdev->msi_desc[i], M_DEVBUF);
417 free(pdev->msi_desc, M_DEVBUF);
418 }
419 kfree(pdev->path_name);
420 free(pdev, M_DEVBUF);
421 }
422
423 struct pci_dev *
lkpinew_pci_dev(device_t dev)424 lkpinew_pci_dev(device_t dev)
425 {
426 struct pci_dev *pdev;
427 int error;
428
429 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO);
430 error = lkpifill_pci_dev(dev, pdev);
431 if (error != 0) {
432 free(pdev, M_DEVBUF);
433 return (NULL);
434 }
435 pdev->dev.release = lkpinew_pci_dev_release;
436
437 return (pdev);
438 }
439
440 struct pci_dev *
lkpi_pci_get_class(unsigned int class,struct pci_dev * from)441 lkpi_pci_get_class(unsigned int class, struct pci_dev *from)
442 {
443 device_t dev;
444 device_t devfrom = NULL;
445 struct pci_dev *pdev;
446
447 if (from != NULL)
448 devfrom = from->dev.bsddev;
449
450 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom);
451 if (dev == NULL)
452 return (NULL);
453
454 pdev = lkpinew_pci_dev(dev);
455 return (pdev);
456 }
457
458 struct pci_dev *
lkpi_pci_get_base_class(unsigned int baseclass,struct pci_dev * from)459 lkpi_pci_get_base_class(unsigned int baseclass, struct pci_dev *from)
460 {
461 device_t dev;
462 device_t devfrom = NULL;
463 struct pci_dev *pdev;
464
465 if (from != NULL)
466 devfrom = from->dev.bsddev;
467
468 dev = pci_find_base_class_from(baseclass, devfrom);
469 if (dev == NULL)
470 return (NULL);
471
472 pdev = lkpinew_pci_dev(dev);
473 return (pdev);
474 }
475
476 struct pci_dev *
lkpi_pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)477 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus,
478 unsigned int devfn)
479 {
480 device_t dev;
481 struct pci_dev *pdev;
482
483 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
484 if (dev == NULL)
485 return (NULL);
486
487 pdev = lkpinew_pci_dev(dev);
488 return (pdev);
489 }
490
491 struct pci_dev *
lkpi_pci_get_slot(struct pci_bus * pbus,unsigned int devfn)492 lkpi_pci_get_slot(struct pci_bus *pbus, unsigned int devfn)
493 {
494 device_t dev;
495 struct pci_dev *pdev;
496
497 dev = pci_find_bsf(pbus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
498 if (dev == NULL)
499 return (NULL);
500
501 pdev = lkpinew_pci_dev(dev);
502 return (pdev);
503 }
504
505 static int
linux_pci_probe(device_t dev)506 linux_pci_probe(device_t dev)
507 {
508 const struct pci_device_id *id;
509 struct pci_driver *pdrv;
510
511 if ((pdrv = linux_pci_find(dev, &id)) == NULL)
512 return (ENXIO);
513 if (device_get_driver(dev) != &pdrv->bsddriver)
514 return (ENXIO);
515 device_set_desc(dev, pdrv->name);
516
517 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */
518 if (pdrv->bsd_probe_return == 0)
519 return (BUS_PROBE_DEFAULT);
520 else
521 return (pdrv->bsd_probe_return);
522 }
523
524 static int
linux_pci_attach(device_t dev)525 linux_pci_attach(device_t dev)
526 {
527 const struct pci_device_id *id;
528 struct pci_driver *pdrv;
529 struct pci_dev *pdev;
530
531 pdrv = linux_pci_find(dev, &id);
532 pdev = device_get_softc(dev);
533
534 MPASS(pdrv != NULL);
535 MPASS(pdev != NULL);
536
537 return (linux_pci_attach_device(dev, pdrv, id, pdev));
538 }
539
540 static struct resource_list_entry *
linux_pci_reserve_bar(struct pci_dev * pdev,struct resource_list * rl,int type,int rid)541 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,
542 int type, int rid)
543 {
544 device_t dev;
545 struct resource *res;
546
547 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY,
548 ("trying to reserve non-BAR type %d", type));
549
550 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
551 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
552 res = pci_reserve_map(device_get_parent(dev), dev, type, rid, 0, ~0,
553 1, 1, 0);
554 if (res == NULL)
555 return (NULL);
556 return (resource_list_find(rl, type, rid));
557 }
558
559 static struct resource_list_entry *
linux_pci_get_rle(struct pci_dev * pdev,int type,int rid,bool reserve_bar)560 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar)
561 {
562 struct pci_devinfo *dinfo;
563 struct resource_list *rl;
564 struct resource_list_entry *rle;
565
566 dinfo = device_get_ivars(pdev->dev.bsddev);
567 rl = &dinfo->resources;
568 rle = resource_list_find(rl, type, rid);
569 /* Reserve resources for this BAR if needed. */
570 if (rle == NULL && reserve_bar)
571 rle = linux_pci_reserve_bar(pdev, rl, type, rid);
572 return (rle);
573 }
574
575 int
linux_pci_attach_device(device_t dev,struct pci_driver * pdrv,const struct pci_device_id * id,struct pci_dev * pdev)576 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
577 const struct pci_device_id *id, struct pci_dev *pdev)
578 {
579 struct resource_list_entry *rle;
580 device_t parent;
581 struct pci_dev *pbus, *ppbus;
582 uintptr_t rid;
583 int error;
584 bool isdrm;
585
586 linux_set_current(curthread);
587
588 parent = device_get_parent(dev);
589 isdrm = pdrv != NULL && pdrv->isdrm;
590
591 if (isdrm) {
592 struct pci_devinfo *dinfo;
593
594 dinfo = device_get_ivars(parent);
595 device_set_ivars(dev, dinfo);
596 }
597
598 error = lkpifill_pci_dev(dev, pdev);
599 if (error != 0)
600 return (error);
601
602 if (isdrm)
603 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid);
604 else
605 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid);
606 pdev->devfn = rid;
607 pdev->pdrv = pdrv;
608 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false);
609 if (rle != NULL)
610 pdev->dev.irq = rle->start;
611 else
612 pdev->dev.irq = LINUX_IRQ_INVALID;
613 pdev->irq = pdev->dev.irq;
614 error = linux_pdev_dma_init(pdev);
615 if (error)
616 goto out_dma_init;
617
618 spin_lock(&pci_lock);
619 list_add(&pdev->links, &pci_devices);
620 spin_unlock(&pci_lock);
621
622 /*
623 * Create the hierarchy now as we cannot on demand later.
624 * Take special care of DRM as there is a non-PCI device in the chain.
625 */
626 pbus = pdev;
627 if (isdrm) {
628 pbus = lkpinew_pci_dev(parent);
629 if (pbus == NULL) {
630 error = ENXIO;
631 goto out_dma_init;
632 }
633 }
634 pcie_find_root_port(pbus);
635 if (isdrm)
636 pdev->root = pbus->root;
637 ppbus = pci_upstream_bridge(pbus);
638 while (ppbus != NULL && ppbus != pbus) {
639 pbus = ppbus;
640 ppbus = pci_upstream_bridge(pbus);
641 }
642
643 if (pdrv != NULL) {
644 error = pdrv->probe(pdev, id);
645 if (error)
646 goto out_probe;
647 }
648 return (0);
649
650 /* XXX the cleanup does not match the allocation up there. */
651 out_probe:
652 free(pdev->bus, M_DEVBUF);
653 spin_lock_destroy(&pdev->pcie_cap_lock);
654 linux_pdev_dma_uninit(pdev);
655 out_dma_init:
656 spin_lock(&pci_lock);
657 list_del(&pdev->links);
658 spin_unlock(&pci_lock);
659 put_device(&pdev->dev);
660 return (-error);
661 }
662
663 static int
linux_pci_detach(device_t dev)664 linux_pci_detach(device_t dev)
665 {
666 struct pci_dev *pdev;
667
668 pdev = device_get_softc(dev);
669
670 MPASS(pdev != NULL);
671
672 device_set_desc(dev, NULL);
673
674 return (linux_pci_detach_device(pdev));
675 }
676
677 int
linux_pci_detach_device(struct pci_dev * pdev)678 linux_pci_detach_device(struct pci_dev *pdev)
679 {
680
681 linux_set_current(curthread);
682
683 if (pdev->pdrv != NULL)
684 pdev->pdrv->remove(pdev);
685
686 if (pdev->root != NULL)
687 pci_dev_put(pdev->root);
688 free(pdev->bus, M_DEVBUF);
689 linux_pdev_dma_uninit(pdev);
690
691 spin_lock(&pci_lock);
692 list_del(&pdev->links);
693 spin_unlock(&pci_lock);
694 spin_lock_destroy(&pdev->pcie_cap_lock);
695 put_device(&pdev->dev);
696
697 return (0);
698 }
699
700 static int
lkpi_pci_disable_dev(struct device * dev)701 lkpi_pci_disable_dev(struct device *dev)
702 {
703
704 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY);
705 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT);
706 return (0);
707 }
708
709 static struct pci_devres *
lkpi_pci_devres_get_alloc(struct pci_dev * pdev)710 lkpi_pci_devres_get_alloc(struct pci_dev *pdev)
711 {
712 struct pci_devres *dr;
713
714 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL);
715 if (dr == NULL) {
716 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr),
717 GFP_KERNEL | __GFP_ZERO);
718 if (dr != NULL)
719 lkpi_devres_add(&pdev->dev, dr);
720 }
721
722 return (dr);
723 }
724
725 static struct pci_devres *
lkpi_pci_devres_find(struct pci_dev * pdev)726 lkpi_pci_devres_find(struct pci_dev *pdev)
727 {
728 if (!pdev->managed)
729 return (NULL);
730
731 return (lkpi_pci_devres_get_alloc(pdev));
732 }
733
734 void
lkpi_pci_devres_release(struct device * dev,void * p)735 lkpi_pci_devres_release(struct device *dev, void *p)
736 {
737 struct pci_devres *dr;
738 struct pci_dev *pdev;
739 int bar;
740
741 pdev = to_pci_dev(dev);
742 dr = p;
743
744 if (pdev->msix_enabled)
745 lkpi_pci_disable_msix(pdev);
746 if (pdev->msi_enabled)
747 lkpi_pci_disable_msi(pdev);
748
749 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0)
750 dr->enable_io = false;
751
752 if (dr->region_mask == 0)
753 return;
754 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
755
756 if ((dr->region_mask & (1 << bar)) == 0)
757 continue;
758 pci_release_region(pdev, bar);
759 }
760 }
761
762 int
linuxkpi_pcim_enable_device(struct pci_dev * pdev)763 linuxkpi_pcim_enable_device(struct pci_dev *pdev)
764 {
765 struct pci_devres *dr;
766 int error;
767
768 /* Here we cannot run through the pdev->managed check. */
769 dr = lkpi_pci_devres_get_alloc(pdev);
770 if (dr == NULL)
771 return (-ENOMEM);
772
773 /* If resources were enabled before do not do it again. */
774 if (dr->enable_io)
775 return (0);
776
777 error = pci_enable_device(pdev);
778 if (error == 0)
779 dr->enable_io = true;
780
781 /* This device is not managed. */
782 pdev->managed = true;
783
784 return (error);
785 }
786
787 static struct pcim_iomap_devres *
lkpi_pcim_iomap_devres_find(struct pci_dev * pdev)788 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev)
789 {
790 struct pcim_iomap_devres *dr;
791
792 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release,
793 NULL, NULL);
794 if (dr == NULL) {
795 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release,
796 sizeof(*dr), GFP_KERNEL | __GFP_ZERO);
797 if (dr != NULL)
798 lkpi_devres_add(&pdev->dev, dr);
799 }
800
801 if (dr == NULL)
802 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__);
803
804 return (dr);
805 }
806
807 void __iomem **
linuxkpi_pcim_iomap_table(struct pci_dev * pdev)808 linuxkpi_pcim_iomap_table(struct pci_dev *pdev)
809 {
810 struct pcim_iomap_devres *dr;
811
812 dr = lkpi_pcim_iomap_devres_find(pdev);
813 if (dr == NULL)
814 return (NULL);
815
816 /*
817 * If the driver has manually set a flag to be able to request the
818 * resource to use bus_read/write_<n>, return the shadow table.
819 */
820 if (pdev->want_iomap_res)
821 return ((void **)dr->res_table);
822
823 /* This is the Linux default. */
824 return (dr->mmio_table);
825 }
826
827 static struct resource *
_lkpi_pci_iomap(struct pci_dev * pdev,int bar,unsigned long maxlen __unused)828 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen __unused)
829 {
830 struct pci_mmio_region *mmio, *p;
831 int type;
832
833 if (!lkpi_pci_bar_id_valid(bar))
834 return (NULL);
835
836 type = pci_resource_type(pdev, bar);
837 if (type < 0) {
838 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
839 __func__, bar, type);
840 return (NULL);
841 }
842
843 /*
844 * Check for duplicate mappings.
845 * This can happen if a driver calls pci_request_region() first.
846 */
847 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
848 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) {
849 return (mmio->res);
850 }
851 }
852
853 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
854 mmio->rid = PCIR_BAR(bar);
855 mmio->type = type;
856 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,
857 &mmio->rid, RF_ACTIVE|RF_SHAREABLE);
858 if (mmio->res == NULL) {
859 device_printf(pdev->dev.bsddev, "%s: failed to alloc "
860 "bar %d type %d rid %d\n",
861 __func__, bar, type, PCIR_BAR(bar));
862 free(mmio, M_DEVBUF);
863 return (NULL);
864 }
865 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
866
867 return (mmio->res);
868 }
869
870 void *
linuxkpi_pci_iomap_range(struct pci_dev * pdev,int bar,unsigned long off,unsigned long maxlen)871 linuxkpi_pci_iomap_range(struct pci_dev *pdev, int bar,
872 unsigned long off, unsigned long maxlen)
873 {
874 struct resource *res;
875
876 if (!lkpi_pci_bar_id_valid(bar))
877 return (NULL);
878
879 res = _lkpi_pci_iomap(pdev, bar, maxlen);
880 if (res == NULL)
881 return (NULL);
882 /* This is a FreeBSD extension so we can use bus_*(). */
883 if (pdev->want_iomap_res)
884 return (res);
885 MPASS(off < rman_get_size(res));
886 return ((void *)(rman_get_bushandle(res) + off));
887 }
888
889 void *
linuxkpi_pci_iomap(struct pci_dev * pdev,int bar,unsigned long maxlen)890 linuxkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
891 {
892 if (!lkpi_pci_bar_id_valid(bar))
893 return (NULL);
894
895 return (linuxkpi_pci_iomap_range(pdev, bar, 0, maxlen));
896 }
897
898 void *
linuxkpi_pcim_iomap(struct pci_dev * pdev,int bar,unsigned long maxlen)899 linuxkpi_pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
900 {
901 struct pcim_iomap_devres *dr;
902 void *res;
903
904 if (!lkpi_pci_bar_id_valid(bar))
905 return (NULL);
906
907 dr = lkpi_pcim_iomap_devres_find(pdev);
908 if (dr == NULL)
909 return (NULL);
910
911 if (dr->res_table[bar] != NULL)
912 return (dr->res_table[bar]);
913
914 res = linuxkpi_pci_iomap(pdev, bar, maxlen);
915 if (res == NULL) {
916 /*
917 * Do not free the devres in case there were
918 * other valid mappings before already.
919 */
920 return (NULL);
921 }
922 lkpi_set_pcim_iomap_devres(dr, bar, res);
923
924 return (res);
925 }
926
927 void
linuxkpi_pci_iounmap(struct pci_dev * pdev,void * res)928 linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res)
929 {
930 struct pci_mmio_region *mmio, *p;
931 bus_space_handle_t bh = (bus_space_handle_t)res;
932
933 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
934 if (pdev->want_iomap_res) {
935 if (res != mmio->res)
936 continue;
937 } else {
938 if (bh < rman_get_bushandle(mmio->res) ||
939 bh >= rman_get_bushandle(mmio->res) +
940 rman_get_size(mmio->res))
941 continue;
942 }
943 bus_release_resource(pdev->dev.bsddev,
944 mmio->type, mmio->rid, mmio->res);
945 TAILQ_REMOVE(&pdev->mmio, mmio, next);
946 free(mmio, M_DEVBUF);
947 return;
948 }
949 }
950
951 int
linuxkpi_pcim_iomap_regions(struct pci_dev * pdev,uint32_t mask,const char * name)952 linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name)
953 {
954 struct pcim_iomap_devres *dr;
955 void *res;
956 uint32_t mappings;
957 int bar;
958
959 dr = lkpi_pcim_iomap_devres_find(pdev);
960 if (dr == NULL)
961 return (-ENOMEM);
962
963 /* Now iomap all the requested (by "mask") ones. */
964 for (bar = mappings = 0; mappings != mask; bar++) {
965 if ((mask & (1 << bar)) == 0)
966 continue;
967
968 /* Request double is not allowed. */
969 if (dr->mmio_table[bar] != NULL) {
970 device_printf(pdev->dev.bsddev, "%s: bar %d %p\n",
971 __func__, bar, dr->mmio_table[bar]);
972 goto err;
973 }
974
975 res = _lkpi_pci_iomap(pdev, bar, 0);
976 if (res == NULL)
977 goto err;
978 lkpi_set_pcim_iomap_devres(dr, bar, res);
979
980 mappings |= (1 << bar);
981 }
982
983 return (0);
984 err:
985 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
986 if ((mappings & (1 << bar)) != 0) {
987 res = dr->mmio_table[bar];
988 if (res == NULL)
989 continue;
990 pci_iounmap(pdev, res);
991 }
992 }
993
994 return (-EINVAL);
995 }
996
997 static void
lkpi_pcim_iomap_table_release(struct device * dev,void * p)998 lkpi_pcim_iomap_table_release(struct device *dev, void *p)
999 {
1000 struct pcim_iomap_devres *dr;
1001 struct pci_dev *pdev;
1002 int bar;
1003
1004 dr = p;
1005 pdev = to_pci_dev(dev);
1006 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
1007
1008 if (dr->mmio_table[bar] == NULL)
1009 continue;
1010
1011 pci_iounmap(pdev, dr->mmio_table[bar]);
1012 }
1013 }
1014
1015 static int
linux_pci_suspend(device_t dev)1016 linux_pci_suspend(device_t dev)
1017 {
1018 const struct dev_pm_ops *pmops;
1019 struct pm_message pm = { };
1020 struct pci_dev *pdev;
1021 int error;
1022
1023 error = 0;
1024 linux_set_current(curthread);
1025 pdev = device_get_softc(dev);
1026 pmops = pdev->pdrv->driver.pm;
1027
1028 if (pdev->pdrv->suspend != NULL)
1029 error = -pdev->pdrv->suspend(pdev, pm);
1030 else if (pmops != NULL && pmops->suspend != NULL) {
1031 error = -pmops->suspend(&pdev->dev);
1032 if (error == 0 && pmops->suspend_late != NULL)
1033 error = -pmops->suspend_late(&pdev->dev);
1034 if (error == 0 && pmops->suspend_noirq != NULL)
1035 error = -pmops->suspend_noirq(&pdev->dev);
1036 }
1037 return (error);
1038 }
1039
1040 static int
linux_pci_resume(device_t dev)1041 linux_pci_resume(device_t dev)
1042 {
1043 const struct dev_pm_ops *pmops;
1044 struct pci_dev *pdev;
1045 int error;
1046
1047 error = 0;
1048 linux_set_current(curthread);
1049 pdev = device_get_softc(dev);
1050 pmops = pdev->pdrv->driver.pm;
1051
1052 if (pdev->pdrv->resume != NULL)
1053 error = -pdev->pdrv->resume(pdev);
1054 else if (pmops != NULL && pmops->resume != NULL) {
1055 if (pmops->resume_early != NULL)
1056 error = -pmops->resume_early(&pdev->dev);
1057 if (error == 0 && pmops->resume != NULL)
1058 error = -pmops->resume(&pdev->dev);
1059 }
1060 return (error);
1061 }
1062
1063 static int
linux_pci_shutdown(device_t dev)1064 linux_pci_shutdown(device_t dev)
1065 {
1066 struct pci_dev *pdev;
1067
1068 linux_set_current(curthread);
1069 pdev = device_get_softc(dev);
1070 if (pdev->pdrv->shutdown != NULL)
1071 pdev->pdrv->shutdown(pdev);
1072 return (0);
1073 }
1074
1075 static int
linux_pci_iov_init(device_t dev,uint16_t num_vfs,const nvlist_t * pf_config)1076 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config)
1077 {
1078 struct pci_dev *pdev;
1079 int error;
1080
1081 linux_set_current(curthread);
1082 pdev = device_get_softc(dev);
1083 if (pdev->pdrv->bsd_iov_init != NULL)
1084 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config);
1085 else
1086 error = EINVAL;
1087 return (error);
1088 }
1089
1090 static void
linux_pci_iov_uninit(device_t dev)1091 linux_pci_iov_uninit(device_t dev)
1092 {
1093 struct pci_dev *pdev;
1094
1095 linux_set_current(curthread);
1096 pdev = device_get_softc(dev);
1097 if (pdev->pdrv->bsd_iov_uninit != NULL)
1098 pdev->pdrv->bsd_iov_uninit(dev);
1099 }
1100
1101 static int
linux_pci_iov_add_vf(device_t dev,uint16_t vfnum,const nvlist_t * vf_config)1102 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)
1103 {
1104 struct pci_dev *pdev;
1105 int error;
1106
1107 linux_set_current(curthread);
1108 pdev = device_get_softc(dev);
1109 if (pdev->pdrv->bsd_iov_add_vf != NULL)
1110 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config);
1111 else
1112 error = EINVAL;
1113 return (error);
1114 }
1115
1116 static int
_linux_pci_register_driver(struct pci_driver * pdrv,devclass_t dc)1117 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc)
1118 {
1119 int error;
1120
1121 linux_set_current(curthread);
1122 spin_lock(&pci_lock);
1123 list_add(&pdrv->node, &pci_drivers);
1124 spin_unlock(&pci_lock);
1125 if (pdrv->bsddriver.name == NULL)
1126 pdrv->bsddriver.name = pdrv->name;
1127 pdrv->bsddriver.methods = pci_methods;
1128 pdrv->bsddriver.size = sizeof(struct pci_dev);
1129
1130 bus_topo_lock();
1131 error = devclass_add_driver(dc, &pdrv->bsddriver,
1132 BUS_PASS_DEFAULT, &pdrv->bsdclass);
1133 bus_topo_unlock();
1134 return (-error);
1135 }
1136
1137 int
linux_pci_register_driver(struct pci_driver * pdrv)1138 linux_pci_register_driver(struct pci_driver *pdrv)
1139 {
1140 devclass_t dc;
1141
1142 pdrv->isdrm = strcmp(pdrv->name, "drmn") == 0;
1143 dc = pdrv->isdrm ? devclass_create("vgapci") : devclass_find("pci");
1144 if (dc == NULL)
1145 return (-ENXIO);
1146 return (_linux_pci_register_driver(pdrv, dc));
1147 }
1148
1149 static struct resource_list_entry *
lkpi_pci_get_bar(struct pci_dev * pdev,int bar,bool reserve)1150 lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve)
1151 {
1152 int type;
1153
1154 type = pci_resource_type(pdev, bar);
1155 if (type < 0)
1156 return (NULL);
1157 bar = PCIR_BAR(bar);
1158 return (linux_pci_get_rle(pdev, type, bar, reserve));
1159 }
1160
1161 struct device *
lkpi_pci_find_irq_dev(unsigned int irq)1162 lkpi_pci_find_irq_dev(unsigned int irq)
1163 {
1164 struct pci_dev *pdev;
1165 struct device *found;
1166
1167 found = NULL;
1168 spin_lock(&pci_lock);
1169 list_for_each_entry(pdev, &pci_devices, links) {
1170 if (irq == pdev->dev.irq ||
1171 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
1172 found = &pdev->dev;
1173 break;
1174 }
1175 }
1176 spin_unlock(&pci_lock);
1177 return (found);
1178 }
1179
1180 unsigned long
pci_resource_start(struct pci_dev * pdev,int bar)1181 pci_resource_start(struct pci_dev *pdev, int bar)
1182 {
1183 struct resource_list_entry *rle;
1184 rman_res_t newstart;
1185 device_t dev;
1186 int error;
1187
1188 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
1189 return (0);
1190 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
1191 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
1192 error = bus_translate_resource(dev, rle->type, rle->start, &newstart);
1193 if (error != 0) {
1194 device_printf(pdev->dev.bsddev,
1195 "translate of %#jx failed: %d\n",
1196 (uintmax_t)rle->start, error);
1197 return (0);
1198 }
1199 return (newstart);
1200 }
1201
1202 unsigned long
pci_resource_len(struct pci_dev * pdev,int bar)1203 pci_resource_len(struct pci_dev *pdev, int bar)
1204 {
1205 struct resource_list_entry *rle;
1206
1207 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
1208 return (0);
1209 return (rle->count);
1210 }
1211
1212 static int
lkpi_pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,bool managed)1213 lkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
1214 bool managed)
1215 {
1216 struct resource *res;
1217 struct pci_devres *dr;
1218 struct pci_mmio_region *mmio;
1219 int rid;
1220 int type;
1221
1222 if (!lkpi_pci_bar_id_valid(bar))
1223 return (-EINVAL);
1224
1225 type = pci_resource_type(pdev, bar);
1226 if (type < 0)
1227 return (0);
1228
1229 rid = PCIR_BAR(bar);
1230 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
1231 RF_ACTIVE|RF_SHAREABLE);
1232 if (res == NULL) {
1233 device_printf(pdev->dev.bsddev, "%s: failed to alloc "
1234 "bar %d type %d rid %d\n",
1235 __func__, bar, type, PCIR_BAR(bar));
1236 return (-EBUSY);
1237 }
1238
1239 /*
1240 * It seems there is an implicit devres tracking on these if the device
1241 * is managed (lkpi_pci_devres_find() case); otherwise the resources are
1242 * not automatically freed on FreeBSD/LinuxKPI though they should be/are
1243 * expected to be by Linux drivers.
1244 * Otherwise if we are called from a pcim-function with the managed
1245 * argument set, we need to track devres independent of pdev->managed.
1246 */
1247 if (managed)
1248 dr = lkpi_pci_devres_get_alloc(pdev);
1249 else
1250 dr = lkpi_pci_devres_find(pdev);
1251 if (dr != NULL) {
1252 dr->region_mask |= (1 << bar);
1253 dr->region_table[bar] = res;
1254 }
1255
1256 /* Even if the device is not managed we need to track it for iomap. */
1257 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
1258 mmio->rid = PCIR_BAR(bar);
1259 mmio->type = type;
1260 mmio->res = res;
1261 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
1262
1263 return (0);
1264 }
1265
1266 int
linuxkpi_pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)1267 linuxkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
1268 {
1269 return (lkpi_pci_request_region(pdev, bar, res_name, false));
1270 }
1271
1272 int
linuxkpi_pci_request_regions(struct pci_dev * pdev,const char * res_name)1273 linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name)
1274 {
1275 int error;
1276 int i;
1277
1278 for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
1279 error = pci_request_region(pdev, i, res_name);
1280 if (error && error != -EBUSY) {
1281 pci_release_regions(pdev);
1282 return (error);
1283 }
1284 }
1285 return (0);
1286 }
1287
1288 int
linuxkpi_pcim_request_all_regions(struct pci_dev * pdev,const char * res_name)1289 linuxkpi_pcim_request_all_regions(struct pci_dev *pdev, const char *res_name)
1290 {
1291 int bar, error;
1292
1293 for (bar = 0; bar <= PCIR_MAX_BAR_0; bar++) {
1294 error = lkpi_pci_request_region(pdev, bar, res_name, true);
1295 if (error != 0 && error != -EBUSY) {
1296 device_printf(pdev->dev.bsddev, "%s: bar %d res_name '%s': "
1297 "lkpi_pci_request_region returned %d\n", __func__,
1298 bar, res_name, error);
1299 pci_release_regions(pdev);
1300 return (error);
1301 }
1302 }
1303 return (0);
1304 }
1305
1306 void
linuxkpi_pci_release_region(struct pci_dev * pdev,int bar)1307 linuxkpi_pci_release_region(struct pci_dev *pdev, int bar)
1308 {
1309 struct resource_list_entry *rle;
1310 struct pci_devres *dr;
1311 struct pci_mmio_region *mmio, *p;
1312
1313 if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL)
1314 return;
1315
1316 /*
1317 * As we implicitly track the requests we also need to clear them on
1318 * release. Do clear before resource release.
1319 */
1320 dr = lkpi_pci_devres_find(pdev);
1321 if (dr != NULL) {
1322 KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d"
1323 " region_table res %p != rel->res %p\n", __func__, pdev,
1324 bar, dr->region_table[bar], rle->res));
1325 dr->region_table[bar] = NULL;
1326 dr->region_mask &= ~(1 << bar);
1327 }
1328
1329 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
1330 if (rle->res != (void *)rman_get_bushandle(mmio->res))
1331 continue;
1332 TAILQ_REMOVE(&pdev->mmio, mmio, next);
1333 free(mmio, M_DEVBUF);
1334 }
1335
1336 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
1337 }
1338
1339 void
linuxkpi_pci_release_regions(struct pci_dev * pdev)1340 linuxkpi_pci_release_regions(struct pci_dev *pdev)
1341 {
1342 int i;
1343
1344 for (i = 0; i <= PCIR_MAX_BAR_0; i++)
1345 pci_release_region(pdev, i);
1346 }
1347
1348 int
linux_pci_register_drm_driver(struct pci_driver * pdrv)1349 linux_pci_register_drm_driver(struct pci_driver *pdrv)
1350 {
1351 devclass_t dc;
1352
1353 dc = devclass_create("vgapci");
1354 if (dc == NULL)
1355 return (-ENXIO);
1356 pdrv->isdrm = true;
1357 pdrv->name = "drmn";
1358 return (_linux_pci_register_driver(pdrv, dc));
1359 }
1360
1361 void
linux_pci_unregister_driver(struct pci_driver * pdrv)1362 linux_pci_unregister_driver(struct pci_driver *pdrv)
1363 {
1364 devclass_t bus;
1365
1366 bus = devclass_find(pdrv->isdrm ? "vgapci" : "pci");
1367
1368 spin_lock(&pci_lock);
1369 list_del(&pdrv->node);
1370 spin_unlock(&pci_lock);
1371 bus_topo_lock();
1372 if (bus != NULL)
1373 devclass_delete_driver(bus, &pdrv->bsddriver);
1374 bus_topo_unlock();
1375 }
1376
1377 void
linux_pci_unregister_drm_driver(struct pci_driver * pdrv)1378 linux_pci_unregister_drm_driver(struct pci_driver *pdrv)
1379 {
1380 devclass_t bus;
1381
1382 bus = devclass_find("vgapci");
1383
1384 spin_lock(&pci_lock);
1385 list_del(&pdrv->node);
1386 spin_unlock(&pci_lock);
1387 bus_topo_lock();
1388 if (bus != NULL)
1389 devclass_delete_driver(bus, &pdrv->bsddriver);
1390 bus_topo_unlock();
1391 }
1392
1393 int
linuxkpi_pci_enable_msix(struct pci_dev * pdev,struct msix_entry * entries,int nreq)1394 linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
1395 int nreq)
1396 {
1397 struct resource_list_entry *rle;
1398 int error;
1399 int avail;
1400 int i;
1401
1402 avail = pci_msix_count(pdev->dev.bsddev);
1403 if (avail < nreq) {
1404 if (avail == 0)
1405 return -EINVAL;
1406 return avail;
1407 }
1408 avail = nreq;
1409 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
1410 return error;
1411 /*
1412 * Handle case where "pci_alloc_msix()" may allocate less
1413 * interrupts than available and return with no error:
1414 */
1415 if (avail < nreq) {
1416 pci_release_msi(pdev->dev.bsddev);
1417 return avail;
1418 }
1419 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
1420 pdev->dev.irq_start = rle->start;
1421 pdev->dev.irq_end = rle->start + avail;
1422 for (i = 0; i < nreq; i++)
1423 entries[i].vector = pdev->dev.irq_start + i;
1424 pdev->msix_enabled = true;
1425 return (0);
1426 }
1427
1428 int
_lkpi_pci_enable_msi_range(struct pci_dev * pdev,int minvec,int maxvec)1429 _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec)
1430 {
1431 struct resource_list_entry *rle;
1432 int error;
1433 int nvec;
1434
1435 if (maxvec < minvec)
1436 return (-EINVAL);
1437
1438 nvec = pci_msi_count(pdev->dev.bsddev);
1439 if (nvec < 1 || nvec < minvec)
1440 return (-ENOSPC);
1441
1442 nvec = min(nvec, maxvec);
1443 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0)
1444 return error;
1445
1446 /* Native PCI might only ever ask for 32 vectors. */
1447 if (nvec < minvec) {
1448 pci_release_msi(pdev->dev.bsddev);
1449 return (-ENOSPC);
1450 }
1451
1452 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
1453 pdev->dev.irq_start = rle->start;
1454 pdev->dev.irq_end = rle->start + nvec;
1455 pdev->irq = rle->start;
1456 pdev->msi_enabled = true;
1457 return (0);
1458 }
1459
1460 int
pci_alloc_irq_vectors(struct pci_dev * pdev,int minv,int maxv,unsigned int flags)1461 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
1462 unsigned int flags)
1463 {
1464 int error;
1465
1466 if (flags & PCI_IRQ_MSIX) {
1467 struct msix_entry *entries;
1468 int i;
1469
1470 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL);
1471 if (entries == NULL) {
1472 error = -ENOMEM;
1473 goto out;
1474 }
1475 for (i = 0; i < maxv; ++i)
1476 entries[i].entry = i;
1477 error = pci_enable_msix(pdev, entries, maxv);
1478 out:
1479 kfree(entries);
1480 if (error == 0 && pdev->msix_enabled)
1481 return (pdev->dev.irq_end - pdev->dev.irq_start);
1482 }
1483 if (flags & PCI_IRQ_MSI) {
1484 if (pci_msi_count(pdev->dev.bsddev) < minv)
1485 return (-ENOSPC);
1486 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv);
1487 if (error == 0 && pdev->msi_enabled)
1488 return (pdev->dev.irq_end - pdev->dev.irq_start);
1489 }
1490 if (flags & PCI_IRQ_INTX) {
1491 if (pdev->irq)
1492 return (1);
1493 }
1494
1495 return (-EINVAL);
1496 }
1497
1498 struct msi_desc *
lkpi_pci_msi_desc_alloc(int irq)1499 lkpi_pci_msi_desc_alloc(int irq)
1500 {
1501 struct device *dev;
1502 struct pci_dev *pdev;
1503 struct msi_desc *desc;
1504 struct pci_devinfo *dinfo;
1505 struct pcicfg_msi *msi;
1506 int vec;
1507
1508 dev = lkpi_pci_find_irq_dev(irq);
1509 if (dev == NULL)
1510 return (NULL);
1511
1512 pdev = to_pci_dev(dev);
1513
1514 if (pdev->msi_desc == NULL)
1515 return (NULL);
1516
1517 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end)
1518 return (NULL);
1519
1520 vec = pdev->dev.irq_start - irq;
1521
1522 if (pdev->msi_desc[vec] != NULL)
1523 return (pdev->msi_desc[vec]);
1524
1525 dinfo = device_get_ivars(dev->bsddev);
1526 msi = &dinfo->cfg.msi;
1527
1528 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO);
1529
1530 desc->pci.msi_attrib.is_64 =
1531 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false;
1532 desc->msg.data = msi->msi_data;
1533
1534 pdev->msi_desc[vec] = desc;
1535
1536 return (desc);
1537 }
1538
1539 bool
pci_device_is_present(struct pci_dev * pdev)1540 pci_device_is_present(struct pci_dev *pdev)
1541 {
1542 device_t dev;
1543
1544 dev = pdev->dev.bsddev;
1545
1546 return (bus_child_present(dev));
1547 }
1548
1549 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t));
1550
1551 struct linux_dma_obj {
1552 void *vaddr;
1553 uint64_t dma_addr;
1554 bus_dmamap_t dmamap;
1555 bus_dma_tag_t dmat;
1556 };
1557
1558 static uma_zone_t linux_dma_trie_zone;
1559 static uma_zone_t linux_dma_obj_zone;
1560
1561 static void
linux_dma_init(void * arg)1562 linux_dma_init(void *arg)
1563 {
1564
1565 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie",
1566 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL,
1567 UMA_ALIGN_PTR, 0);
1568 linux_dma_obj_zone = uma_zcreate("linux_dma_object",
1569 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL,
1570 UMA_ALIGN_PTR, 0);
1571 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK);
1572 }
1573 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL);
1574
1575 static void
linux_dma_uninit(void * arg)1576 linux_dma_uninit(void *arg)
1577 {
1578
1579 counter_u64_free(lkpi_pci_nseg1_fail);
1580 uma_zdestroy(linux_dma_obj_zone);
1581 uma_zdestroy(linux_dma_trie_zone);
1582 }
1583 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL);
1584
1585 static void *
linux_dma_trie_alloc(struct pctrie * ptree)1586 linux_dma_trie_alloc(struct pctrie *ptree)
1587 {
1588
1589 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT));
1590 }
1591
1592 static void
linux_dma_trie_free(struct pctrie * ptree,void * node)1593 linux_dma_trie_free(struct pctrie *ptree, void *node)
1594 {
1595
1596 uma_zfree(linux_dma_trie_zone, node);
1597 }
1598
1599 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc,
1600 linux_dma_trie_free);
1601
1602 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1603 static dma_addr_t
linux_dma_map_phys_common(struct device * dev,vm_paddr_t phys,size_t len,bus_dma_tag_t dmat)1604 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len,
1605 bus_dma_tag_t dmat)
1606 {
1607 struct linux_dma_priv *priv;
1608 struct linux_dma_obj *obj;
1609 int error, nseg;
1610 bus_dma_segment_t seg;
1611
1612 priv = dev->dma_priv;
1613
1614 /*
1615 * If the resultant mapping will be entirely 1:1 with the
1616 * physical address, short-circuit the remainder of the
1617 * bus_dma API. This avoids tracking collisions in the pctrie
1618 * with the additional benefit of reducing overhead.
1619 */
1620 if (bus_dma_id_mapped(dmat, phys, len))
1621 return (phys);
1622
1623 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT);
1624 if (obj == NULL) {
1625 return (0);
1626 }
1627 obj->dmat = dmat;
1628
1629 DMA_PRIV_LOCK(priv);
1630 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) {
1631 DMA_PRIV_UNLOCK(priv);
1632 uma_zfree(linux_dma_obj_zone, obj);
1633 return (0);
1634 }
1635
1636 nseg = -1;
1637 error = _bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len,
1638 BUS_DMA_NOWAIT, &seg, &nseg);
1639 if (error != 0) {
1640 bus_dmamap_destroy(obj->dmat, obj->dmamap);
1641 DMA_PRIV_UNLOCK(priv);
1642 uma_zfree(linux_dma_obj_zone, obj);
1643 counter_u64_add(lkpi_pci_nseg1_fail, 1);
1644 if (linuxkpi_debug) {
1645 device_printf(dev->bsddev, "%s: _bus_dmamap_load_phys "
1646 "error %d, phys %#018jx len %zu\n", __func__,
1647 error, (uintmax_t)phys, len);
1648 dump_stack();
1649 }
1650 return (0);
1651 }
1652
1653 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
1654 obj->dma_addr = seg.ds_addr;
1655
1656 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj);
1657 if (error != 0) {
1658 bus_dmamap_unload(obj->dmat, obj->dmamap);
1659 bus_dmamap_destroy(obj->dmat, obj->dmamap);
1660 DMA_PRIV_UNLOCK(priv);
1661 uma_zfree(linux_dma_obj_zone, obj);
1662 return (0);
1663 }
1664 DMA_PRIV_UNLOCK(priv);
1665 return (obj->dma_addr);
1666 }
1667 #else
1668 static dma_addr_t
linux_dma_map_phys_common(struct device * dev __unused,vm_paddr_t phys,size_t len __unused,bus_dma_tag_t dmat __unused)1669 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys,
1670 size_t len __unused, bus_dma_tag_t dmat __unused)
1671 {
1672 return (phys);
1673 }
1674 #endif
1675
1676 dma_addr_t
lkpi_dma_map_phys(struct device * dev,vm_paddr_t phys,size_t len,enum dma_data_direction direction,unsigned long attrs)1677 lkpi_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len,
1678 enum dma_data_direction direction, unsigned long attrs)
1679 {
1680 struct linux_dma_priv *priv;
1681 dma_addr_t dma;
1682
1683 priv = dev->dma_priv;
1684 dma = linux_dma_map_phys_common(dev, phys, len, priv->dmat);
1685 if (dma_mapping_error(dev, dma))
1686 return (dma);
1687
1688 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1689 dma_sync_single_for_device(dev, dma, len, direction);
1690
1691 return (dma);
1692 }
1693
1694 /* For backward compat only so we can MFC this. Remove before 15. */
1695 dma_addr_t
linux_dma_map_phys(struct device * dev,vm_paddr_t phys,size_t len)1696 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
1697 {
1698 return (lkpi_dma_map_phys(dev, phys, len, DMA_NONE, 0));
1699 }
1700
1701 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1702 void
lkpi_dma_unmap(struct device * dev,dma_addr_t dma_addr,size_t len,enum dma_data_direction direction,unsigned long attrs)1703 lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len,
1704 enum dma_data_direction direction, unsigned long attrs)
1705 {
1706 struct linux_dma_priv *priv;
1707 struct linux_dma_obj *obj;
1708
1709 priv = dev->dma_priv;
1710
1711 if (pctrie_is_empty(&priv->ptree))
1712 return;
1713
1714 DMA_PRIV_LOCK(priv);
1715 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
1716 if (obj == NULL) {
1717 DMA_PRIV_UNLOCK(priv);
1718 return;
1719 }
1720 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr);
1721
1722 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0)
1723 goto skip_sync;
1724
1725 /* dma_sync_single_for_cpu() unrolled to avoid lock recursicn. */
1726 switch (direction) {
1727 case DMA_BIDIRECTIONAL:
1728 bus_dmamap_sync(obj->dmat, obj->dmamap, BUS_DMASYNC_POSTREAD);
1729 bus_dmamap_sync(obj->dmat, obj->dmamap, BUS_DMASYNC_PREREAD);
1730 break;
1731 case DMA_TO_DEVICE:
1732 bus_dmamap_sync(obj->dmat, obj->dmamap, BUS_DMASYNC_POSTWRITE);
1733 break;
1734 case DMA_FROM_DEVICE:
1735 bus_dmamap_sync(obj->dmat, obj->dmamap, BUS_DMASYNC_POSTREAD);
1736 break;
1737 default:
1738 break;
1739 }
1740
1741 skip_sync:
1742 bus_dmamap_unload(obj->dmat, obj->dmamap);
1743 bus_dmamap_destroy(obj->dmat, obj->dmamap);
1744 DMA_PRIV_UNLOCK(priv);
1745
1746 uma_zfree(linux_dma_obj_zone, obj);
1747 }
1748 #else
1749 void
lkpi_dma_unmap(struct device * dev,dma_addr_t dma_addr,size_t len,enum dma_data_direction direction,unsigned long attrs)1750 lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len,
1751 enum dma_data_direction direction, unsigned long attrs)
1752 {
1753 }
1754 #endif
1755
1756 /* For backward compat only so we can MFC this. Remove before 15. */
1757 void
linux_dma_unmap(struct device * dev,dma_addr_t dma_addr,size_t len)1758 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
1759 {
1760 lkpi_dma_unmap(dev, dma_addr, len, DMA_NONE, 0);
1761 }
1762
1763 void *
linux_dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)1764 linux_dma_alloc_coherent(struct device *dev, size_t size,
1765 dma_addr_t *dma_handle, gfp_t flag)
1766 {
1767 struct linux_dma_priv *priv;
1768 vm_paddr_t high;
1769 size_t align;
1770 void *mem;
1771
1772 if (dev == NULL || dev->dma_priv == NULL) {
1773 *dma_handle = 0;
1774 return (NULL);
1775 }
1776 priv = dev->dma_priv;
1777 if (priv->dma_coherent_mask)
1778 high = priv->dma_coherent_mask;
1779 else
1780 /* Coherent is lower 32bit only by default in Linux. */
1781 high = BUS_SPACE_MAXADDR_32BIT;
1782 align = PAGE_SIZE << get_order(size);
1783 /* Always zero the allocation. */
1784 flag |= M_ZERO;
1785 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
1786 align, 0, VM_MEMATTR_DEFAULT);
1787 if (mem != NULL) {
1788 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size,
1789 priv->dmat_coherent);
1790 if (*dma_handle == 0) {
1791 kmem_free(mem, size);
1792 mem = NULL;
1793 }
1794 } else {
1795 *dma_handle = 0;
1796 }
1797 return (mem);
1798 }
1799
1800 struct lkpi_devres_dmam_coherent {
1801 size_t size;
1802 dma_addr_t *handle;
1803 void *mem;
1804 };
1805
1806 static void
lkpi_dmam_free_coherent(struct device * dev,void * p)1807 lkpi_dmam_free_coherent(struct device *dev, void *p)
1808 {
1809 struct lkpi_devres_dmam_coherent *dr;
1810
1811 dr = p;
1812 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle);
1813 }
1814
1815 static int
lkpi_dmam_coherent_match(struct device * dev,void * dr,void * mp)1816 lkpi_dmam_coherent_match(struct device *dev, void *dr, void *mp)
1817 {
1818 struct lkpi_devres_dmam_coherent *a, *b;
1819
1820 a = dr;
1821 b = mp;
1822
1823 if (a->mem != b->mem)
1824 return (0);
1825 if (a->size != b->size || a->handle != b->handle)
1826 dev_WARN(dev, "for mem %p: size %zu != %zu || handle %#jx != %#jx\n",
1827 a->mem, a->size, b->size,
1828 (uintmax_t)a->handle, (uintmax_t)b->handle);
1829 return (1);
1830 }
1831
1832 void
linuxkpi_dmam_free_coherent(struct device * dev,size_t size,void * addr,dma_addr_t dma_handle)1833 linuxkpi_dmam_free_coherent(struct device *dev, size_t size,
1834 void *addr, dma_addr_t dma_handle)
1835 {
1836 struct lkpi_devres_dmam_coherent match = {
1837 .size = size,
1838 .handle = &dma_handle,
1839 .mem = addr
1840 };
1841 int error;
1842
1843 error = devres_destroy(dev, lkpi_dmam_free_coherent,
1844 lkpi_dmam_coherent_match, &match);
1845 if (error != 0)
1846 dev_WARN(dev, "devres_destroy returned %d, size %zu addr %p "
1847 "dma_handle %#jx\n", error, size, addr, (uintmax_t)dma_handle);
1848 dma_free_coherent(dev, size, addr, dma_handle);
1849 }
1850
1851 void *
linuxkpi_dmam_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)1852 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
1853 gfp_t flag)
1854 {
1855 struct lkpi_devres_dmam_coherent *dr;
1856
1857 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent,
1858 sizeof(*dr), GFP_KERNEL | __GFP_ZERO);
1859
1860 if (dr == NULL)
1861 return (NULL);
1862
1863 dr->size = size;
1864 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag);
1865 dr->handle = dma_handle;
1866 if (dr->mem == NULL) {
1867 lkpi_devres_free(dr);
1868 return (NULL);
1869 }
1870
1871 lkpi_devres_add(dev, dr);
1872 return (dr->mem);
1873 }
1874
1875 void
linuxkpi_dma_sync(struct device * dev,dma_addr_t dma_addr,size_t size,bus_dmasync_op_t op)1876 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size,
1877 bus_dmasync_op_t op)
1878 {
1879 struct linux_dma_priv *priv;
1880 struct linux_dma_obj *obj;
1881
1882 priv = dev->dma_priv;
1883
1884 if (pctrie_is_empty(&priv->ptree))
1885 return;
1886
1887 DMA_PRIV_LOCK(priv);
1888 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
1889 if (obj == NULL) {
1890 DMA_PRIV_UNLOCK(priv);
1891 return;
1892 }
1893
1894 bus_dmamap_sync(obj->dmat, obj->dmamap, op);
1895 DMA_PRIV_UNLOCK(priv);
1896 }
1897
1898 int
linux_dma_map_sg_attrs(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction,unsigned long attrs)1899 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,
1900 enum dma_data_direction direction, unsigned long attrs)
1901 {
1902 struct linux_dma_priv *priv;
1903 struct scatterlist *sg;
1904 int i, nseg;
1905 bus_dma_segment_t seg;
1906
1907 priv = dev->dma_priv;
1908
1909 DMA_PRIV_LOCK(priv);
1910
1911 /* create common DMA map in the first S/G entry */
1912 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) {
1913 DMA_PRIV_UNLOCK(priv);
1914 return (0);
1915 }
1916
1917 /* load all S/G list entries */
1918 for_each_sg(sgl, sg, nents, i) {
1919 nseg = -1;
1920 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map,
1921 sg_phys(sg), sg->length, BUS_DMA_NOWAIT,
1922 &seg, &nseg) != 0) {
1923 bus_dmamap_unload(priv->dmat, sgl->dma_map);
1924 bus_dmamap_destroy(priv->dmat, sgl->dma_map);
1925 DMA_PRIV_UNLOCK(priv);
1926 return (0);
1927 }
1928 KASSERT(nseg == 0,
1929 ("More than one segment (nseg=%d)", nseg + 1));
1930
1931 sg_dma_address(sg) = seg.ds_addr;
1932 }
1933
1934 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0)
1935 goto skip_sync;
1936
1937 switch (direction) {
1938 case DMA_BIDIRECTIONAL:
1939 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);
1940 break;
1941 case DMA_TO_DEVICE:
1942 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);
1943 break;
1944 case DMA_FROM_DEVICE:
1945 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);
1946 break;
1947 default:
1948 break;
1949 }
1950 skip_sync:
1951
1952 DMA_PRIV_UNLOCK(priv);
1953
1954 return (nents);
1955 }
1956
1957 void
linux_dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sgl,int nents __unused,enum dma_data_direction direction,unsigned long attrs)1958 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
1959 int nents __unused, enum dma_data_direction direction,
1960 unsigned long attrs)
1961 {
1962 struct linux_dma_priv *priv;
1963
1964 priv = dev->dma_priv;
1965
1966 DMA_PRIV_LOCK(priv);
1967
1968 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0)
1969 goto skip_sync;
1970
1971 switch (direction) {
1972 case DMA_BIDIRECTIONAL:
1973 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);
1974 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);
1975 break;
1976 case DMA_TO_DEVICE:
1977 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE);
1978 break;
1979 case DMA_FROM_DEVICE:
1980 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);
1981 break;
1982 default:
1983 break;
1984 }
1985 skip_sync:
1986
1987 bus_dmamap_unload(priv->dmat, sgl->dma_map);
1988 bus_dmamap_destroy(priv->dmat, sgl->dma_map);
1989 DMA_PRIV_UNLOCK(priv);
1990 }
1991
1992 struct dma_pool {
1993 struct device *pool_device;
1994 uma_zone_t pool_zone;
1995 struct mtx pool_lock;
1996 bus_dma_tag_t pool_dmat;
1997 size_t pool_entry_size;
1998 struct pctrie pool_ptree;
1999 };
2000
2001 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock)
2002 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock)
2003
2004 static inline int
dma_pool_obj_ctor(void * mem,int size,void * arg,int flags)2005 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags)
2006 {
2007 struct linux_dma_obj *obj = mem;
2008 struct dma_pool *pool = arg;
2009 int error, nseg;
2010 bus_dma_segment_t seg;
2011
2012 nseg = -1;
2013 DMA_POOL_LOCK(pool);
2014 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap,
2015 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT,
2016 &seg, &nseg);
2017 DMA_POOL_UNLOCK(pool);
2018 if (error != 0) {
2019 return (error);
2020 }
2021 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
2022 obj->dma_addr = seg.ds_addr;
2023
2024 return (0);
2025 }
2026
2027 static void
dma_pool_obj_dtor(void * mem,int size,void * arg)2028 dma_pool_obj_dtor(void *mem, int size, void *arg)
2029 {
2030 struct linux_dma_obj *obj = mem;
2031 struct dma_pool *pool = arg;
2032
2033 DMA_POOL_LOCK(pool);
2034 bus_dmamap_unload(pool->pool_dmat, obj->dmamap);
2035 DMA_POOL_UNLOCK(pool);
2036 }
2037
2038 static int
dma_pool_obj_import(void * arg,void ** store,int count,int domain __unused,int flags)2039 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused,
2040 int flags)
2041 {
2042 struct dma_pool *pool = arg;
2043 struct linux_dma_obj *obj;
2044 int error, i;
2045
2046 for (i = 0; i < count; i++) {
2047 obj = uma_zalloc(linux_dma_obj_zone, flags);
2048 if (obj == NULL)
2049 break;
2050
2051 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr,
2052 BUS_DMA_NOWAIT, &obj->dmamap);
2053 if (error!= 0) {
2054 uma_zfree(linux_dma_obj_zone, obj);
2055 break;
2056 }
2057
2058 store[i] = obj;
2059 }
2060
2061 return (i);
2062 }
2063
2064 static void
dma_pool_obj_release(void * arg,void ** store,int count)2065 dma_pool_obj_release(void *arg, void **store, int count)
2066 {
2067 struct dma_pool *pool = arg;
2068 struct linux_dma_obj *obj;
2069 int i;
2070
2071 for (i = 0; i < count; i++) {
2072 obj = store[i];
2073 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap);
2074 uma_zfree(linux_dma_obj_zone, obj);
2075 }
2076 }
2077
2078 struct dma_pool *
linux_dma_pool_create(char * name,struct device * dev,size_t size,size_t align,size_t boundary)2079 linux_dma_pool_create(char *name, struct device *dev, size_t size,
2080 size_t align, size_t boundary)
2081 {
2082 struct linux_dma_priv *priv;
2083 struct dma_pool *pool;
2084
2085 priv = dev->dma_priv;
2086
2087 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2088 pool->pool_device = dev;
2089 pool->pool_entry_size = size;
2090
2091 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
2092 align, boundary, /* alignment, boundary */
2093 priv->dma_mask, /* lowaddr */
2094 BUS_SPACE_MAXADDR, /* highaddr */
2095 NULL, NULL, /* filtfunc, filtfuncarg */
2096 size, /* maxsize */
2097 1, /* nsegments */
2098 size, /* maxsegsz */
2099 0, /* flags */
2100 NULL, NULL, /* lockfunc, lockfuncarg */
2101 &pool->pool_dmat)) {
2102 kfree(pool);
2103 return (NULL);
2104 }
2105
2106 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor,
2107 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import,
2108 dma_pool_obj_release, pool, 0);
2109
2110 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF);
2111 pctrie_init(&pool->pool_ptree);
2112
2113 return (pool);
2114 }
2115
2116 void
linux_dma_pool_destroy(struct dma_pool * pool)2117 linux_dma_pool_destroy(struct dma_pool *pool)
2118 {
2119
2120 uma_zdestroy(pool->pool_zone);
2121 bus_dma_tag_destroy(pool->pool_dmat);
2122 mtx_destroy(&pool->pool_lock);
2123 kfree(pool);
2124 }
2125
2126 void
lkpi_dmam_pool_destroy(struct device * dev,void * p)2127 lkpi_dmam_pool_destroy(struct device *dev, void *p)
2128 {
2129 struct dma_pool *pool;
2130
2131 pool = *(struct dma_pool **)p;
2132 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree);
2133 linux_dma_pool_destroy(pool);
2134 }
2135
2136 void *
linux_dma_pool_alloc(struct dma_pool * pool,gfp_t mem_flags,dma_addr_t * handle)2137 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
2138 dma_addr_t *handle)
2139 {
2140 struct linux_dma_obj *obj;
2141
2142 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK);
2143 if (obj == NULL)
2144 return (NULL);
2145
2146 DMA_POOL_LOCK(pool);
2147 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) {
2148 DMA_POOL_UNLOCK(pool);
2149 uma_zfree_arg(pool->pool_zone, obj, pool);
2150 return (NULL);
2151 }
2152 DMA_POOL_UNLOCK(pool);
2153
2154 *handle = obj->dma_addr;
2155 return (obj->vaddr);
2156 }
2157
2158 void
linux_dma_pool_free(struct dma_pool * pool,void * vaddr,dma_addr_t dma_addr)2159 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr)
2160 {
2161 struct linux_dma_obj *obj;
2162
2163 DMA_POOL_LOCK(pool);
2164 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr);
2165 if (obj == NULL) {
2166 DMA_POOL_UNLOCK(pool);
2167 return;
2168 }
2169 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr);
2170 DMA_POOL_UNLOCK(pool);
2171
2172 uma_zfree_arg(pool->pool_zone, obj, pool);
2173 }
2174
2175 static int
linux_backlight_get_status(device_t dev,struct backlight_props * props)2176 linux_backlight_get_status(device_t dev, struct backlight_props *props)
2177 {
2178 struct pci_dev *pdev;
2179
2180 linux_set_current(curthread);
2181 pdev = device_get_softc(dev);
2182
2183 props->brightness = pdev->dev.bd->props.brightness;
2184 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness;
2185 props->nlevels = 0;
2186
2187 return (0);
2188 }
2189
2190 static int
linux_backlight_get_info(device_t dev,struct backlight_info * info)2191 linux_backlight_get_info(device_t dev, struct backlight_info *info)
2192 {
2193 struct pci_dev *pdev;
2194
2195 linux_set_current(curthread);
2196 pdev = device_get_softc(dev);
2197
2198 info->type = BACKLIGHT_TYPE_PANEL;
2199 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH);
2200 return (0);
2201 }
2202
2203 static int
linux_backlight_update_status(device_t dev,struct backlight_props * props)2204 linux_backlight_update_status(device_t dev, struct backlight_props *props)
2205 {
2206 struct pci_dev *pdev;
2207
2208 linux_set_current(curthread);
2209 pdev = device_get_softc(dev);
2210
2211 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness *
2212 props->brightness / 100;
2213 pdev->dev.bd->props.power = props->brightness == 0 ?
2214 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */;
2215 return (pdev->dev.bd->ops->update_status(pdev->dev.bd));
2216 }
2217
2218 struct backlight_device *
linux_backlight_device_register(const char * name,struct device * dev,void * data,const struct backlight_ops * ops,struct backlight_properties * props)2219 linux_backlight_device_register(const char *name, struct device *dev,
2220 void *data, const struct backlight_ops *ops, struct backlight_properties *props)
2221 {
2222
2223 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO);
2224 dev->bd->ops = ops;
2225 dev->bd->props.type = props->type;
2226 dev->bd->props.max_brightness = props->max_brightness;
2227 dev->bd->props.brightness = props->brightness;
2228 dev->bd->props.power = props->power;
2229 dev->bd->data = data;
2230 dev->bd->dev = dev;
2231 dev->bd->name = strdup(name, M_DEVBUF);
2232
2233 dev->backlight_dev = backlight_register(name, dev->bsddev);
2234
2235 return (dev->bd);
2236 }
2237
2238 void
linux_backlight_device_unregister(struct backlight_device * bd)2239 linux_backlight_device_unregister(struct backlight_device *bd)
2240 {
2241
2242 backlight_destroy(bd->dev->backlight_dev);
2243 free(bd->name, M_DEVBUF);
2244 free(bd, M_DEVBUF);
2245 }
2246