1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4
5 /*
6 * This header file contains MSI data structures and functions which are
7 * only relevant for:
8 * - Interrupt core code
9 * - PCI/MSI core code
10 * - MSI interrupt domain implementations
11 * - IOMMU, low level VFIO, NTB and other justified exceptions
12 * dealing with low level MSI details.
13 *
14 * Regular device drivers have no business with any of these functions and
15 * especially storing MSI descriptor pointers in random code is considered
16 * abuse.
17 *
18 * Device driver relevant functions are available in <linux/msi_api.h>
19 */
20
21 #include <linux/irqdomain_defs.h>
22 #include <linux/cpumask_types.h>
23 #include <linux/msi_api.h>
24 #include <linux/irq.h>
25
26 #include <asm/msi.h>
27
28 /* Dummy shadow structures if an architecture does not define them */
29 #ifndef arch_msi_msg_addr_lo
30 typedef struct arch_msi_msg_addr_lo {
31 u32 address_lo;
32 } __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
33 #endif
34
35 #ifndef arch_msi_msg_addr_hi
36 typedef struct arch_msi_msg_addr_hi {
37 u32 address_hi;
38 } __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
39 #endif
40
41 #ifndef arch_msi_msg_data
42 typedef struct arch_msi_msg_data {
43 u32 data;
44 } __attribute__ ((packed)) arch_msi_msg_data_t;
45 #endif
46
47 #ifndef arch_is_isolated_msi
48 #define arch_is_isolated_msi() false
49 #endif
50
51 /**
52 * msi_msg - Representation of a MSI message
53 * @address_lo: Low 32 bits of msi message address
54 * @arch_addrlo: Architecture specific shadow of @address_lo
55 * @address_hi: High 32 bits of msi message address
56 * (only used when device supports it)
57 * @arch_addrhi: Architecture specific shadow of @address_hi
58 * @data: MSI message data (usually 16 bits)
59 * @arch_data: Architecture specific shadow of @data
60 */
61 struct msi_msg {
62 union {
63 u32 address_lo;
64 arch_msi_msg_addr_lo_t arch_addr_lo;
65 };
66 union {
67 u32 address_hi;
68 arch_msi_msg_addr_hi_t arch_addr_hi;
69 };
70 union {
71 u32 data;
72 arch_msi_msg_data_t arch_data;
73 };
74 };
75
76 /* Helper functions */
77 struct msi_desc;
78 struct pci_dev;
79 struct device_attribute;
80 struct irq_domain;
81 struct irq_affinity_desc;
82
83 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
84 #ifdef CONFIG_GENERIC_MSI_IRQ
85 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
86 #else
get_cached_msi_msg(unsigned int irq,struct msi_msg * msg)87 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { }
88 #endif
89
90 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
91 struct msi_msg *msg);
92
93 /**
94 * pci_msi_desc - PCI/MSI specific MSI descriptor data
95 *
96 * @msi_mask: [PCI MSI] MSI cached mask bits
97 * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
98 * @is_msix: [PCI MSI/X] True if MSI-X
99 * @multiple: [PCI MSI/X] log2 num of messages allocated
100 * @multi_cap: [PCI MSI/X] log2 num of messages supported
101 * @can_mask: [PCI MSI/X] Masking supported?
102 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
103 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
104 * @mask_pos: [PCI MSI] Mask register position
105 * @mask_base: [PCI MSI-X] Mask register base address
106 */
107 struct pci_msi_desc {
108 union {
109 u32 msi_mask;
110 u32 msix_ctrl;
111 };
112 struct {
113 u8 is_msix : 1;
114 u8 multiple : 3;
115 u8 multi_cap : 3;
116 u8 can_mask : 1;
117 u8 is_64 : 1;
118 u8 is_virtual : 1;
119 unsigned default_irq;
120 } msi_attrib;
121 union {
122 u8 mask_pos;
123 void __iomem *mask_base;
124 };
125 };
126
127 /**
128 * union msi_domain_cookie - Opaque MSI domain specific data
129 * @value: u64 value store
130 * @ptr: Pointer to domain specific data
131 * @iobase: Domain specific IOmem pointer
132 *
133 * The content of this data is implementation defined and used by the MSI
134 * domain to store domain specific information which is requried for
135 * interrupt chip callbacks.
136 */
137 union msi_domain_cookie {
138 u64 value;
139 void *ptr;
140 void __iomem *iobase;
141 };
142
143 /**
144 * struct msi_desc_data - Generic MSI descriptor data
145 * @dcookie: Cookie for MSI domain specific data which is required
146 * for irq_chip callbacks
147 * @icookie: Cookie for the MSI interrupt instance provided by
148 * the usage site to the allocation function
149 *
150 * The content of this data is implementation defined, e.g. PCI/IMS
151 * implementations define the meaning of the data. The MSI core ignores
152 * this data completely.
153 */
154 struct msi_desc_data {
155 union msi_domain_cookie dcookie;
156 union msi_instance_cookie icookie;
157 };
158
159 #define MSI_MAX_INDEX ((unsigned int)USHRT_MAX)
160
161 /**
162 * struct msi_desc - Descriptor structure for MSI based interrupts
163 * @irq: The base interrupt number
164 * @nvec_used: The number of vectors used
165 * @dev: Pointer to the device which uses this descriptor
166 * @msg: The last set MSI message cached for reuse
167 * @affinity: Optional pointer to a cpu affinity mask for this descriptor
168 * @iommu_msi_iova: Optional shifted IOVA from the IOMMU to override the msi_addr.
169 * Only used if iommu_msi_shift != 0
170 * @iommu_msi_shift: Indicates how many bits of the original address should be
171 * preserved when using iommu_msi_iova.
172 * @sysfs_attr: Pointer to sysfs device attribute
173 *
174 * @write_msi_msg: Callback that may be called when the MSI message
175 * address or data changes
176 * @write_msi_msg_data: Data parameter for the callback.
177 *
178 * @msi_index: Index of the msi descriptor
179 * @pci: PCI specific msi descriptor data
180 * @data: Generic MSI descriptor data
181 */
182 struct msi_desc {
183 /* Shared device/bus type independent data */
184 unsigned int irq;
185 unsigned int nvec_used;
186 struct device *dev;
187 struct msi_msg msg;
188 struct irq_affinity_desc *affinity;
189 #ifdef CONFIG_IRQ_MSI_IOMMU
190 u64 iommu_msi_iova : 58;
191 u64 iommu_msi_shift : 6;
192 #endif
193 #ifdef CONFIG_SYSFS
194 struct device_attribute *sysfs_attrs;
195 #endif
196
197 void (*write_msi_msg)(struct msi_desc *entry, void *data);
198 void *write_msi_msg_data;
199
200 u16 msi_index;
201 union {
202 struct pci_msi_desc pci;
203 struct msi_desc_data data;
204 };
205 };
206
207 /*
208 * Filter values for the MSI descriptor iterators and accessor functions.
209 */
210 enum msi_desc_filter {
211 /* All descriptors */
212 MSI_DESC_ALL,
213 /* Descriptors which have no interrupt associated */
214 MSI_DESC_NOTASSOCIATED,
215 /* Descriptors which have an interrupt associated */
216 MSI_DESC_ASSOCIATED,
217 };
218
219
220 /**
221 * struct msi_dev_domain - The internals of MSI domain info per device
222 * @store: Xarray for storing MSI descriptor pointers
223 * @irqdomain: Pointer to a per device interrupt domain
224 */
225 struct msi_dev_domain {
226 struct xarray store;
227 struct irq_domain *domain;
228 };
229
230 int msi_setup_device_data(struct device *dev);
231
232 void msi_lock_descs(struct device *dev);
233 void msi_unlock_descs(struct device *dev);
234
235 struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
236 enum msi_desc_filter filter);
237
238 /**
239 * msi_first_desc - Get the first MSI descriptor of the default irqdomain
240 * @dev: Device to operate on
241 * @filter: Descriptor state filter
242 *
243 * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
244 * must be invoked before the call.
245 *
246 * Return: Pointer to the first MSI descriptor matching the search
247 * criteria, NULL if none found.
248 */
msi_first_desc(struct device * dev,enum msi_desc_filter filter)249 static inline struct msi_desc *msi_first_desc(struct device *dev,
250 enum msi_desc_filter filter)
251 {
252 return msi_domain_first_desc(dev, MSI_DEFAULT_DOMAIN, filter);
253 }
254
255 struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
256 enum msi_desc_filter filter);
257
258 /**
259 * msi_domain_for_each_desc - Iterate the MSI descriptors in a specific domain
260 *
261 * @desc: struct msi_desc pointer used as iterator
262 * @dev: struct device pointer - device to iterate
263 * @domid: The id of the interrupt domain which should be walked.
264 * @filter: Filter for descriptor selection
265 *
266 * Notes:
267 * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
268 * pair.
269 * - It is safe to remove a retrieved MSI descriptor in the loop.
270 */
271 #define msi_domain_for_each_desc(desc, dev, domid, filter) \
272 for ((desc) = msi_domain_first_desc((dev), (domid), (filter)); (desc); \
273 (desc) = msi_next_desc((dev), (domid), (filter)))
274
275 /**
276 * msi_for_each_desc - Iterate the MSI descriptors in the default irqdomain
277 *
278 * @desc: struct msi_desc pointer used as iterator
279 * @dev: struct device pointer - device to iterate
280 * @filter: Filter for descriptor selection
281 *
282 * Notes:
283 * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
284 * pair.
285 * - It is safe to remove a retrieved MSI descriptor in the loop.
286 */
287 #define msi_for_each_desc(desc, dev, filter) \
288 msi_domain_for_each_desc((desc), (dev), MSI_DEFAULT_DOMAIN, (filter))
289
290 #define msi_desc_to_dev(desc) ((desc)->dev)
291
msi_desc_set_iommu_msi_iova(struct msi_desc * desc,u64 msi_iova,unsigned int msi_shift)292 static inline void msi_desc_set_iommu_msi_iova(struct msi_desc *desc, u64 msi_iova,
293 unsigned int msi_shift)
294 {
295 #ifdef CONFIG_IRQ_MSI_IOMMU
296 desc->iommu_msi_iova = msi_iova >> msi_shift;
297 desc->iommu_msi_shift = msi_shift;
298 #endif
299 }
300
301 /**
302 * msi_msg_set_addr() - Set MSI address in an MSI message
303 *
304 * @desc: MSI descriptor that may carry an IOVA base address for MSI via @iommu_msi_iova/shift
305 * @msg: Target MSI message to set its address_hi and address_lo
306 * @msi_addr: Physical address to set the MSI message
307 *
308 * Notes:
309 * - Override @msi_addr using the IOVA base address in the @desc if @iommu_msi_shift is set
310 * - Otherwise, simply set @msi_addr to @msg
311 */
msi_msg_set_addr(struct msi_desc * desc,struct msi_msg * msg,phys_addr_t msi_addr)312 static inline void msi_msg_set_addr(struct msi_desc *desc, struct msi_msg *msg,
313 phys_addr_t msi_addr)
314 {
315 #ifdef CONFIG_IRQ_MSI_IOMMU
316 if (desc->iommu_msi_shift) {
317 u64 msi_iova = desc->iommu_msi_iova << desc->iommu_msi_shift;
318
319 msg->address_hi = upper_32_bits(msi_iova);
320 msg->address_lo = lower_32_bits(msi_iova) |
321 (msi_addr & ((1 << desc->iommu_msi_shift) - 1));
322 return;
323 }
324 #endif
325 msg->address_hi = upper_32_bits(msi_addr);
326 msg->address_lo = lower_32_bits(msi_addr);
327 }
328
329 int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
330 struct msi_desc *init_desc);
331 /**
332 * msi_insert_msi_desc - Allocate and initialize a MSI descriptor in the
333 * default irqdomain and insert it at @init_desc->msi_index
334 * @dev: Pointer to the device for which the descriptor is allocated
335 * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
336 *
337 * Return: 0 on success or an appropriate failure code.
338 */
msi_insert_msi_desc(struct device * dev,struct msi_desc * init_desc)339 static inline int msi_insert_msi_desc(struct device *dev, struct msi_desc *init_desc)
340 {
341 return msi_domain_insert_msi_desc(dev, MSI_DEFAULT_DOMAIN, init_desc);
342 }
343
344 void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
345 unsigned int first, unsigned int last);
346
347 /**
348 * msi_free_msi_descs_range - Free a range of MSI descriptors of a device
349 * in the default irqdomain
350 *
351 * @dev: Device for which to free the descriptors
352 * @first: Index to start freeing from (inclusive)
353 * @last: Last index to be freed (inclusive)
354 */
msi_free_msi_descs_range(struct device * dev,unsigned int first,unsigned int last)355 static inline void msi_free_msi_descs_range(struct device *dev, unsigned int first,
356 unsigned int last)
357 {
358 msi_domain_free_msi_descs_range(dev, MSI_DEFAULT_DOMAIN, first, last);
359 }
360
361 /**
362 * msi_free_msi_descs - Free all MSI descriptors of a device in the default irqdomain
363 * @dev: Device to free the descriptors
364 */
msi_free_msi_descs(struct device * dev)365 static inline void msi_free_msi_descs(struct device *dev)
366 {
367 msi_free_msi_descs_range(dev, 0, MSI_MAX_INDEX);
368 }
369
370 /*
371 * The arch hooks to setup up msi irqs. Default functions are implemented
372 * as weak symbols so that they /can/ be overriden by architecture specific
373 * code if needed. These hooks can only be enabled by the architecture.
374 *
375 * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
376 * stubs with warnings.
377 */
378 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
379 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
380 void arch_teardown_msi_irq(unsigned int irq);
381 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
382 void arch_teardown_msi_irqs(struct pci_dev *dev);
383 #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
384
385 /*
386 * Xen uses non-default msi_domain_ops and hence needs a way to populate sysfs
387 * entries of MSI IRQs.
388 */
389 #if defined(CONFIG_PCI_XEN) || defined(CONFIG_PCI_MSI_ARCH_FALLBACKS)
390 #ifdef CONFIG_SYSFS
391 int msi_device_populate_sysfs(struct device *dev);
392 void msi_device_destroy_sysfs(struct device *dev);
393 #else /* CONFIG_SYSFS */
msi_device_populate_sysfs(struct device * dev)394 static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
msi_device_destroy_sysfs(struct device * dev)395 static inline void msi_device_destroy_sysfs(struct device *dev) { }
396 #endif /* !CONFIG_SYSFS */
397 #endif /* CONFIG_PCI_XEN || CONFIG_PCI_MSI_ARCH_FALLBACKS */
398
399 /*
400 * The restore hook is still available even for fully irq domain based
401 * setups. Courtesy to XEN/X86.
402 */
403 bool arch_restore_msi_irqs(struct pci_dev *dev);
404
405 #ifdef CONFIG_GENERIC_MSI_IRQ
406
407 #include <linux/irqhandler.h>
408
409 struct irq_domain;
410 struct irq_domain_ops;
411 struct irq_chip;
412 struct irq_fwspec;
413 struct device_node;
414 struct fwnode_handle;
415 struct msi_domain_info;
416
417 /**
418 * struct msi_domain_ops - MSI interrupt domain callbacks
419 * @get_hwirq: Retrieve the resulting hw irq number
420 * @msi_init: Domain specific init function for MSI interrupts
421 * @msi_free: Domain specific function to free a MSI interrupts
422 * @msi_prepare: Prepare the allocation of the interrupts in the domain
423 * @prepare_desc: Optional function to prepare the allocated MSI descriptor
424 * in the domain
425 * @set_desc: Set the msi descriptor for an interrupt
426 * @domain_alloc_irqs: Optional function to override the default allocation
427 * function.
428 * @domain_free_irqs: Optional function to override the default free
429 * function.
430 * @msi_post_free: Optional function which is invoked after freeing
431 * all interrupts.
432 * @msi_translate: Optional translate callback to support the odd wire to
433 * MSI bridges, e.g. MBIGEN
434 *
435 * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
436 * irqdomain.
437 *
438 * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the
439 * msi_domain_alloc/free_irqs*() variants.
440 *
441 * @domain_alloc_irqs, @domain_free_irqs can be used to override the
442 * default allocation/free functions (__msi_domain_alloc/free_irqs). This
443 * is initially for a wrapper around XENs seperate MSI universe which can't
444 * be wrapped into the regular irq domains concepts by mere mortals. This
445 * allows to universally use msi_domain_alloc/free_irqs without having to
446 * special case XEN all over the place.
447 */
448 struct msi_domain_ops {
449 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
450 msi_alloc_info_t *arg);
451 int (*msi_init)(struct irq_domain *domain,
452 struct msi_domain_info *info,
453 unsigned int virq, irq_hw_number_t hwirq,
454 msi_alloc_info_t *arg);
455 void (*msi_free)(struct irq_domain *domain,
456 struct msi_domain_info *info,
457 unsigned int virq);
458 int (*msi_prepare)(struct irq_domain *domain,
459 struct device *dev, int nvec,
460 msi_alloc_info_t *arg);
461 void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
462 struct msi_desc *desc);
463 void (*set_desc)(msi_alloc_info_t *arg,
464 struct msi_desc *desc);
465 int (*domain_alloc_irqs)(struct irq_domain *domain,
466 struct device *dev, int nvec);
467 void (*domain_free_irqs)(struct irq_domain *domain,
468 struct device *dev);
469 void (*msi_post_free)(struct irq_domain *domain,
470 struct device *dev);
471 int (*msi_translate)(struct irq_domain *domain, struct irq_fwspec *fwspec,
472 irq_hw_number_t *hwirq, unsigned int *type);
473 };
474
475 /**
476 * struct msi_domain_info - MSI interrupt domain data
477 * @flags: Flags to decribe features and capabilities
478 * @bus_token: The domain bus token
479 * @hwsize: The hardware table size or the software index limit.
480 * If 0 then the size is considered unlimited and
481 * gets initialized to the maximum software index limit
482 * by the domain creation code.
483 * @ops: The callback data structure
484 * @chip: Optional: associated interrupt chip
485 * @chip_data: Optional: associated interrupt chip data
486 * @handler: Optional: associated interrupt flow handler
487 * @handler_data: Optional: associated interrupt flow handler data
488 * @handler_name: Optional: associated interrupt flow handler name
489 * @data: Optional: domain specific data
490 */
491 struct msi_domain_info {
492 u32 flags;
493 enum irq_domain_bus_token bus_token;
494 unsigned int hwsize;
495 struct msi_domain_ops *ops;
496 struct irq_chip *chip;
497 void *chip_data;
498 irq_flow_handler_t handler;
499 void *handler_data;
500 const char *handler_name;
501 void *data;
502 };
503
504 /**
505 * struct msi_domain_template - Template for MSI device domains
506 * @name: Storage for the resulting name. Filled in by the core.
507 * @chip: Interrupt chip for this domain
508 * @ops: MSI domain ops
509 * @info: MSI domain info data
510 */
511 struct msi_domain_template {
512 char name[48];
513 struct irq_chip chip;
514 struct msi_domain_ops ops;
515 struct msi_domain_info info;
516 };
517
518 /*
519 * Flags for msi_domain_info
520 *
521 * Bit 0-15: Generic MSI functionality which is not subject to restriction
522 * by parent domains
523 *
524 * Bit 16-31: Functionality which depends on the underlying parent domain and
525 * can be masked out by msi_parent_ops::init_dev_msi_info() when
526 * a device MSI domain is initialized.
527 */
528 enum {
529 /*
530 * Init non implemented ops callbacks with default MSI domain
531 * callbacks.
532 */
533 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
534 /*
535 * Init non implemented chip callbacks with default MSI chip
536 * callbacks.
537 */
538 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
539 /* Needs early activate, required for PCI */
540 MSI_FLAG_ACTIVATE_EARLY = (1 << 2),
541 /*
542 * Must reactivate when irq is started even when
543 * MSI_FLAG_ACTIVATE_EARLY has been set.
544 */
545 MSI_FLAG_MUST_REACTIVATE = (1 << 3),
546 /* Populate sysfs on alloc() and destroy it on free() */
547 MSI_FLAG_DEV_SYSFS = (1 << 4),
548 /* Allocate simple MSI descriptors */
549 MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
550 /* Free MSI descriptors */
551 MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
552 /* Use dev->fwnode for MSI device domain creation */
553 MSI_FLAG_USE_DEV_FWNODE = (1 << 7),
554 /* Set parent->dev into domain->pm_dev on device domain creation */
555 MSI_FLAG_PARENT_PM_DEV = (1 << 8),
556 /* Support for parent mask/unmask */
557 MSI_FLAG_PCI_MSI_MASK_PARENT = (1 << 9),
558
559 /* Mask for the generic functionality */
560 MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
561
562 /* Mask for the domain specific functionality */
563 MSI_DOMAIN_FLAGS_MASK = GENMASK(31, 16),
564
565 /* Support multiple PCI MSI interrupts */
566 MSI_FLAG_MULTI_PCI_MSI = (1 << 16),
567 /* Support PCI MSIX interrupts */
568 MSI_FLAG_PCI_MSIX = (1 << 17),
569 /* Is level-triggered capable, using two messages */
570 MSI_FLAG_LEVEL_CAPABLE = (1 << 18),
571 /* MSI-X entries must be contiguous */
572 MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
573 /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
574 MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
575 /* PCI MSIs cannot be steered separately to CPU cores */
576 MSI_FLAG_NO_AFFINITY = (1 << 21),
577 /* Inhibit usage of entry masking */
578 MSI_FLAG_NO_MASK = (1 << 22),
579 };
580
581 /*
582 * Flags for msi_parent_ops::chip_flags
583 */
584 enum {
585 MSI_CHIP_FLAG_SET_EOI = (1 << 0),
586 MSI_CHIP_FLAG_SET_ACK = (1 << 1),
587 };
588
589 /**
590 * struct msi_parent_ops - MSI parent domain callbacks and configuration info
591 *
592 * @supported_flags: Required: The supported MSI flags of the parent domain
593 * @required_flags: Optional: The required MSI flags of the parent MSI domain
594 * @chip_flags: Optional: Select MSI chip callbacks to update with defaults
595 * in msi_lib_init_dev_msi_info().
596 * @bus_select_token: Optional: The bus token of the real parent domain for
597 * irq_domain::select()
598 * @bus_select_mask: Optional: A mask of supported BUS_DOMAINs for
599 * irq_domain::select()
600 * @prefix: Optional: Prefix for the domain and chip name
601 * @init_dev_msi_info: Required: Callback for MSI parent domains to setup parent
602 * domain specific domain flags, domain ops and interrupt chip
603 * callbacks when a per device domain is created.
604 */
605 struct msi_parent_ops {
606 u32 supported_flags;
607 u32 required_flags;
608 u32 chip_flags;
609 u32 bus_select_token;
610 u32 bus_select_mask;
611 const char *prefix;
612 bool (*init_dev_msi_info)(struct device *dev, struct irq_domain *domain,
613 struct irq_domain *msi_parent_domain,
614 struct msi_domain_info *msi_child_info);
615 };
616
617 bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
618 struct irq_domain *msi_parent_domain,
619 struct msi_domain_info *msi_child_info);
620
621 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
622 bool force);
623
624 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
625 struct msi_domain_info *info,
626 struct irq_domain *parent);
627
628 bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
629 const struct msi_domain_template *template,
630 unsigned int hwsize, void *domain_data,
631 void *chip_data);
632 void msi_remove_device_irq_domain(struct device *dev, unsigned int domid);
633
634 bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
635 enum irq_domain_bus_token bus_token);
636
637 int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
638 unsigned int first, unsigned int last);
639 int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
640 unsigned int first, unsigned int last);
641 int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs);
642
643 struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index,
644 const struct irq_affinity_desc *affdesc,
645 union msi_instance_cookie *cookie);
646
647 void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
648 unsigned int first, unsigned int last);
649 void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
650 unsigned int first, unsigned int last);
651 void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid);
652 void msi_domain_free_irqs_all(struct device *dev, unsigned int domid);
653
654 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
655
656 /* Per device platform MSI */
657 int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
658 irq_write_msi_msg_t write_msi_msg);
659 void platform_device_msi_free_irqs_all(struct device *dev);
660
661 bool msi_device_has_isolated_msi(struct device *dev);
662
msi_domain_alloc_irqs(struct device * dev,unsigned int domid,int nirqs)663 static inline int msi_domain_alloc_irqs(struct device *dev, unsigned int domid, int nirqs)
664 {
665 return msi_domain_alloc_irqs_range(dev, domid, 0, nirqs - 1);
666 }
667
668 #else /* CONFIG_GENERIC_MSI_IRQ */
msi_device_has_isolated_msi(struct device * dev)669 static inline bool msi_device_has_isolated_msi(struct device *dev)
670 {
671 /*
672 * Arguably if the platform does not enable MSI support then it has
673 * "isolated MSI", as an interrupt controller that cannot receive MSIs
674 * is inherently isolated by our definition. The default definition for
675 * arch_is_isolated_msi() is conservative and returns false anyhow.
676 */
677 return arch_is_isolated_msi();
678 }
679 #endif /* CONFIG_GENERIC_MSI_IRQ */
680
681 /* PCI specific interfaces */
682 #ifdef CONFIG_PCI_MSI
683 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
684 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
685 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
686 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
687 void pci_msi_mask_irq(struct irq_data *data);
688 void pci_msi_unmask_irq(struct irq_data *data);
689 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
690 struct msi_domain_info *info,
691 struct irq_domain *parent);
692 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
693 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
694 #else /* CONFIG_PCI_MSI */
pci_msi_get_device_domain(struct pci_dev * pdev)695 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
696 {
697 return NULL;
698 }
pci_write_msi_msg(unsigned int irq,struct msi_msg * msg)699 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { }
700 #endif /* !CONFIG_PCI_MSI */
701
702 #endif /* LINUX_MSI_H */
703