xref: /linux/drivers/pci/msi/msi.c (revision 40286d6379aacfcc053253ef78dc78b09addffda) !
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Message Signaled Interrupt (MSI)
4  *
5  * Copyright (C) 2003-2004 Intel
6  * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7  * Copyright (C) 2016 Christoph Hellwig.
8  */
9 #include <linux/bitfield.h>
10 #include <linux/err.h>
11 #include <linux/export.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 
15 #include "../pci.h"
16 #include "msi.h"
17 
18 bool pci_msi_enable = true;
19 
20 /**
21  * pci_msi_supported - check whether MSI may be enabled on a device
22  * @dev: pointer to the pci_dev data structure of MSI device function
23  * @nvec: how many MSIs have been requested?
24  *
25  * Look at global flags, the device itself, and its parent buses
26  * to determine if MSI/-X are supported for the device. If MSI/-X is
27  * supported return 1, else return 0.
28  **/
29 static int pci_msi_supported(struct pci_dev *dev, int nvec)
30 {
31 	struct pci_bus *bus;
32 
33 	/* MSI must be globally enabled and supported by the device */
34 	if (!pci_msi_enable)
35 		return 0;
36 
37 	if (!dev || dev->no_msi)
38 		return 0;
39 
40 	/*
41 	 * You can't ask to have 0 or less MSIs configured.
42 	 *  a) it's stupid ..
43 	 *  b) the list manipulation code assumes nvec >= 1.
44 	 */
45 	if (nvec < 1)
46 		return 0;
47 
48 	/*
49 	 * Any bridge which does NOT route MSI transactions from its
50 	 * secondary bus to its primary bus must set NO_MSI flag on
51 	 * the secondary pci_bus.
52 	 *
53 	 * The NO_MSI flag can either be set directly by:
54 	 * - arch-specific PCI host bus controller drivers (deprecated)
55 	 * - quirks for specific PCI bridges
56 	 *
57 	 * or indirectly by platform-specific PCI host bridge drivers by
58 	 * advertising the 'msi_domain' property, which results in
59 	 * the NO_MSI flag when no MSI domain is found for this bridge
60 	 * at probe time.
61 	 */
62 	for (bus = dev->bus; bus; bus = bus->parent)
63 		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
64 			return 0;
65 
66 	return 1;
67 }
68 
69 static void pcim_msi_release(void *pcidev)
70 {
71 	struct pci_dev *dev = pcidev;
72 
73 	dev->is_msi_managed = false;
74 	pci_free_irq_vectors(dev);
75 }
76 
77 /*
78  * Needs to be separate from pcim_release to prevent an ordering problem
79  * vs. msi_device_data_release() in the MSI core code.
80  *
81  * TODO: Remove the legacy side-effect of pcim_enable_device() that
82  * activates automatic IRQ vector management. This design is dangerous
83  * and confusing because it switches normally un-managed functions
84  * into managed mode. Drivers should explicitly manage their IRQ vectors
85  * without this implicit behavior.
86  *
87  * The current implementation uses both pdev->is_managed and
88  * pdev->is_msi_managed flags, which adds unnecessary complexity.
89  * This should be simplified in a future kernel version.
90  */
91 static int pcim_setup_msi_release(struct pci_dev *dev)
92 {
93 	int ret;
94 
95 	if (!pci_is_managed(dev) || dev->is_msi_managed)
96 		return 0;
97 
98 	ret = devm_add_action(&dev->dev, pcim_msi_release, dev);
99 	if (ret)
100 		return ret;
101 
102 	dev->is_msi_managed = true;
103 	return 0;
104 }
105 
106 /*
107  * Ordering vs. devres: msi device data has to be installed first so that
108  * pcim_msi_release() is invoked before it on device release.
109  */
110 static int pci_setup_msi_context(struct pci_dev *dev)
111 {
112 	int ret = msi_setup_device_data(&dev->dev);
113 
114 	if (ret)
115 		return ret;
116 
117 	return pcim_setup_msi_release(dev);
118 }
119 
120 /*
121  * Helper functions for mask/unmask and MSI message handling
122  */
123 
124 void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
125 {
126 	struct pci_dev *dev = msi_desc_to_pci_dev(desc);
127 	raw_spinlock_t *lock = &dev->msi_lock;
128 	unsigned long flags;
129 
130 	if (!desc->pci.msi_attrib.can_mask)
131 		return;
132 
133 	raw_spin_lock_irqsave(lock, flags);
134 	desc->pci.msi_mask &= ~clear;
135 	desc->pci.msi_mask |= set;
136 	pci_write_config_dword(dev, desc->pci.mask_pos, desc->pci.msi_mask);
137 	raw_spin_unlock_irqrestore(lock, flags);
138 }
139 
140 /**
141  * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts
142  * @data:	pointer to irqdata associated to that interrupt
143  */
144 void pci_msi_mask_irq(struct irq_data *data)
145 {
146 	struct msi_desc *desc = irq_data_get_msi_desc(data);
147 
148 	__pci_msi_mask_desc(desc, BIT(data->irq - desc->irq));
149 }
150 EXPORT_SYMBOL_GPL(pci_msi_mask_irq);
151 
152 /**
153  * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts
154  * @data:	pointer to irqdata associated to that interrupt
155  */
156 void pci_msi_unmask_irq(struct irq_data *data)
157 {
158 	struct msi_desc *desc = irq_data_get_msi_desc(data);
159 
160 	__pci_msi_unmask_desc(desc, BIT(data->irq - desc->irq));
161 }
162 EXPORT_SYMBOL_GPL(pci_msi_unmask_irq);
163 
164 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
165 {
166 	struct pci_dev *dev = msi_desc_to_pci_dev(entry);
167 
168 	BUG_ON(dev->current_state != PCI_D0);
169 
170 	if (entry->pci.msi_attrib.is_msix) {
171 		void __iomem *base = pci_msix_desc_addr(entry);
172 
173 		if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual))
174 			return;
175 
176 		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
177 		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
178 		msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
179 	} else {
180 		int pos = dev->msi_cap;
181 		u16 data;
182 
183 		pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
184 				      &msg->address_lo);
185 		if (entry->pci.msi_attrib.is_64) {
186 			pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
187 					      &msg->address_hi);
188 			pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
189 		} else {
190 			msg->address_hi = 0;
191 			pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data);
192 		}
193 		msg->data = data;
194 	}
195 }
196 
197 static inline void pci_write_msg_msi(struct pci_dev *dev, struct msi_desc *desc,
198 				     struct msi_msg *msg)
199 {
200 	int pos = dev->msi_cap;
201 	u16 msgctl;
202 
203 	pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
204 	msgctl &= ~PCI_MSI_FLAGS_QSIZE;
205 	msgctl |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, desc->pci.msi_attrib.multiple);
206 	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
207 
208 	pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, msg->address_lo);
209 	if (desc->pci.msi_attrib.is_64) {
210 		pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,  msg->address_hi);
211 		pci_write_config_word(dev, pos + PCI_MSI_DATA_64, msg->data);
212 	} else {
213 		pci_write_config_word(dev, pos + PCI_MSI_DATA_32, msg->data);
214 	}
215 	/* Ensure that the writes are visible in the device */
216 	pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
217 }
218 
219 static inline void pci_write_msg_msix(struct msi_desc *desc, struct msi_msg *msg)
220 {
221 	void __iomem *base = pci_msix_desc_addr(desc);
222 	u32 ctrl = desc->pci.msix_ctrl;
223 	bool unmasked = !(ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT);
224 
225 	if (desc->pci.msi_attrib.is_virtual)
226 		return;
227 	/*
228 	 * The specification mandates that the entry is masked
229 	 * when the message is modified:
230 	 *
231 	 * "If software changes the Address or Data value of an
232 	 * entry while the entry is unmasked, the result is
233 	 * undefined."
234 	 */
235 	if (unmasked)
236 		pci_msix_write_vector_ctrl(desc, ctrl | PCI_MSIX_ENTRY_CTRL_MASKBIT);
237 
238 	writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
239 	writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
240 	writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
241 
242 	if (unmasked)
243 		pci_msix_write_vector_ctrl(desc, ctrl);
244 
245 	/* Ensure that the writes are visible in the device */
246 	readl(base + PCI_MSIX_ENTRY_DATA);
247 }
248 
249 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
250 {
251 	struct pci_dev *dev = msi_desc_to_pci_dev(entry);
252 
253 	if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) {
254 		/* Don't touch the hardware now */
255 	} else if (entry->pci.msi_attrib.is_msix) {
256 		pci_write_msg_msix(entry, msg);
257 	} else {
258 		pci_write_msg_msi(dev, entry, msg);
259 	}
260 
261 	entry->msg = *msg;
262 
263 	if (entry->write_msi_msg)
264 		entry->write_msi_msg(entry, entry->write_msi_msg_data);
265 }
266 
267 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
268 {
269 	struct msi_desc *entry = irq_get_msi_desc(irq);
270 
271 	__pci_write_msi_msg(entry, msg);
272 }
273 EXPORT_SYMBOL_GPL(pci_write_msi_msg);
274 
275 
276 /* PCI/MSI specific functionality */
277 
278 static void pci_intx_for_msi(struct pci_dev *dev, int enable)
279 {
280 	if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
281 		pci_intx(dev, enable);
282 }
283 
284 static void pci_msi_set_enable(struct pci_dev *dev, int enable)
285 {
286 	u16 control;
287 
288 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
289 	control &= ~PCI_MSI_FLAGS_ENABLE;
290 	if (enable)
291 		control |= PCI_MSI_FLAGS_ENABLE;
292 	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
293 }
294 
295 static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
296 			      struct irq_affinity_desc *masks)
297 {
298 	struct msi_desc desc;
299 	u16 control;
300 
301 	/* MSI Entry Initialization */
302 	memset(&desc, 0, sizeof(desc));
303 
304 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
305 	/* Lies, damned lies, and MSIs */
306 	if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
307 		control |= PCI_MSI_FLAGS_MASKBIT;
308 	if (pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY))
309 		control &= ~PCI_MSI_FLAGS_MASKBIT;
310 
311 	desc.nvec_used			= nvec;
312 	desc.pci.msi_attrib.is_64	= !!(control & PCI_MSI_FLAGS_64BIT);
313 	desc.pci.msi_attrib.can_mask	= !!(control & PCI_MSI_FLAGS_MASKBIT);
314 	desc.pci.msi_attrib.default_irq	= dev->irq;
315 	desc.pci.msi_attrib.multi_cap	= FIELD_GET(PCI_MSI_FLAGS_QMASK, control);
316 	desc.pci.msi_attrib.multiple	= ilog2(__roundup_pow_of_two(nvec));
317 	desc.affinity			= masks;
318 
319 	if (control & PCI_MSI_FLAGS_64BIT)
320 		desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
321 	else
322 		desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
323 
324 	/* Save the initial mask status */
325 	if (desc.pci.msi_attrib.can_mask)
326 		pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask);
327 
328 	return msi_insert_msi_desc(&dev->dev, &desc);
329 }
330 
331 static int msi_verify_entries(struct pci_dev *dev)
332 {
333 	struct msi_desc *entry;
334 	u64 address;
335 
336 	if (dev->msi_addr_mask == DMA_BIT_MASK(64))
337 		return 0;
338 
339 	msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
340 		address = (u64)entry->msg.address_hi << 32 | entry->msg.address_lo;
341 		if (address & ~dev->msi_addr_mask) {
342 			pci_err(dev, "arch assigned 64-bit MSI address %#llx above device MSI address mask %#llx\n",
343 				address, dev->msi_addr_mask);
344 			break;
345 		}
346 	}
347 	return !entry ? 0 : -EIO;
348 }
349 
350 static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
351 {
352 	int ret = msi_setup_msi_desc(dev, nvec, masks);
353 	struct msi_desc *entry, desc;
354 
355 	if (ret)
356 		return ret;
357 
358 	/* All MSIs are unmasked by default; mask them all */
359 	entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
360 	pci_msi_mask(entry, msi_multi_mask(entry));
361 	/*
362 	 * Copy the MSI descriptor for the error path because
363 	 * pci_msi_setup_msi_irqs() will free it for the hierarchical
364 	 * interrupt domain case.
365 	 */
366 	memcpy(&desc, entry, sizeof(desc));
367 
368 	/* Configure MSI capability structure */
369 	ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
370 	if (ret)
371 		goto err;
372 
373 	ret = msi_verify_entries(dev);
374 	if (ret)
375 		goto err;
376 
377 	/* Set MSI enabled bits	*/
378 	dev->msi_enabled = 1;
379 	pci_intx_for_msi(dev, 0);
380 	pci_msi_set_enable(dev, 1);
381 
382 	pcibios_free_irq(dev);
383 	dev->irq = entry->irq;
384 	return 0;
385 err:
386 	pci_msi_unmask(&desc, msi_multi_mask(&desc));
387 	pci_free_msi_irqs(dev);
388 	return ret;
389 }
390 
391 /**
392  * msi_capability_init - configure device's MSI capability structure
393  * @dev: pointer to the pci_dev data structure of MSI device function
394  * @nvec: number of interrupts to allocate
395  * @affd: description of automatic IRQ affinity assignments (may be %NULL)
396  *
397  * Setup the MSI capability structure of the device with the requested
398  * number of interrupts.  A return value of zero indicates the successful
399  * setup of an entry with the new MSI IRQ.  A negative return value indicates
400  * an error, and a positive return value indicates the number of interrupts
401  * which could have been allocated.
402  */
403 static int msi_capability_init(struct pci_dev *dev, int nvec,
404 			       struct irq_affinity *affd)
405 {
406 	/* Reject multi-MSI early on irq domain enabled architectures */
407 	if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
408 		return 1;
409 
410 	/*
411 	 * Disable MSI during setup in the hardware, but mark it enabled
412 	 * so that setup code can evaluate it.
413 	 */
414 	pci_msi_set_enable(dev, 0);
415 
416 	struct irq_affinity_desc *masks __free(kfree) =
417 		affd ? irq_create_affinity_masks(nvec, affd) : NULL;
418 
419 	guard(msi_descs_lock)(&dev->dev);
420 	return __msi_capability_init(dev, nvec, masks);
421 }
422 
423 int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
424 			   struct irq_affinity *affd)
425 {
426 	int nvec;
427 	int rc;
428 
429 	if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0)
430 		return -EINVAL;
431 
432 	/* Check whether driver already requested MSI-X IRQs */
433 	if (dev->msix_enabled) {
434 		pci_info(dev, "can't enable MSI (MSI-X already enabled)\n");
435 		return -EINVAL;
436 	}
437 
438 	if (maxvec < minvec)
439 		return -ERANGE;
440 
441 	if (WARN_ON_ONCE(dev->msi_enabled))
442 		return -EINVAL;
443 
444 	/* Test for the availability of MSI support */
445 	if (!pci_msi_domain_supports(dev, 0, ALLOW_LEGACY))
446 		return -ENOTSUPP;
447 
448 	nvec = pci_msi_vec_count(dev);
449 	if (nvec < 0)
450 		return nvec;
451 	if (nvec < minvec)
452 		return -ENOSPC;
453 
454 	rc = pci_setup_msi_context(dev);
455 	if (rc)
456 		return rc;
457 
458 	if (!pci_setup_msi_device_domain(dev, nvec))
459 		return -ENODEV;
460 
461 	if (nvec > maxvec)
462 		nvec = maxvec;
463 
464 	for (;;) {
465 		if (affd) {
466 			nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
467 			if (nvec < minvec)
468 				return -ENOSPC;
469 		}
470 
471 		rc = msi_capability_init(dev, nvec, affd);
472 		if (rc == 0)
473 			return nvec;
474 
475 		if (rc < 0)
476 			return rc;
477 		if (rc < minvec)
478 			return -ENOSPC;
479 
480 		nvec = rc;
481 	}
482 }
483 
484 /**
485  * pci_msi_vec_count - Return the number of MSI vectors a device can send
486  * @dev: device to report about
487  *
488  * This function returns the number of MSI vectors a device requested via
489  * Multiple Message Capable register. It returns a negative errno if the
490  * device is not capable sending MSI interrupts. Otherwise, the call succeeds
491  * and returns a power of two, up to a maximum of 2^5 (32), according to the
492  * MSI specification.
493  **/
494 int pci_msi_vec_count(struct pci_dev *dev)
495 {
496 	int ret;
497 	u16 msgctl;
498 
499 	if (!dev->msi_cap)
500 		return -EINVAL;
501 
502 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
503 	ret = 1 << FIELD_GET(PCI_MSI_FLAGS_QMASK, msgctl);
504 
505 	return ret;
506 }
507 EXPORT_SYMBOL(pci_msi_vec_count);
508 
509 /*
510  * Architecture override returns true when the PCI MSI message should be
511  * written by the generic restore function.
512  */
513 bool __weak arch_restore_msi_irqs(struct pci_dev *dev)
514 {
515 	return true;
516 }
517 
518 void __pci_restore_msi_state(struct pci_dev *dev)
519 {
520 	struct msi_desc *entry;
521 	u16 control;
522 
523 	if (!dev->msi_enabled)
524 		return;
525 
526 	entry = irq_get_msi_desc(dev->irq);
527 
528 	pci_intx_for_msi(dev, 0);
529 	pci_msi_set_enable(dev, 0);
530 	if (arch_restore_msi_irqs(dev))
531 		__pci_write_msi_msg(entry, &entry->msg);
532 
533 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
534 	pci_msi_update_mask(entry, 0, 0);
535 	control &= ~PCI_MSI_FLAGS_QSIZE;
536 	control |= PCI_MSI_FLAGS_ENABLE |
537 		   FIELD_PREP(PCI_MSI_FLAGS_QSIZE, entry->pci.msi_attrib.multiple);
538 	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
539 }
540 
541 void pci_msi_shutdown(struct pci_dev *dev)
542 {
543 	struct msi_desc *desc;
544 
545 	if (!pci_msi_enable || !dev || !dev->msi_enabled)
546 		return;
547 
548 	pci_msi_set_enable(dev, 0);
549 	pci_intx_for_msi(dev, 1);
550 	dev->msi_enabled = 0;
551 
552 	/* Return the device with MSI unmasked as initial states */
553 	desc = msi_first_desc(&dev->dev, MSI_DESC_ALL);
554 	if (!WARN_ON_ONCE(!desc))
555 		pci_msi_unmask(desc, msi_multi_mask(desc));
556 
557 	/* Restore dev->irq to its default pin-assertion IRQ */
558 	dev->irq = desc->pci.msi_attrib.default_irq;
559 	pcibios_alloc_irq(dev);
560 }
561 
562 /* PCI/MSI-X specific functionality */
563 
564 static void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
565 {
566 	u16 ctrl;
567 
568 	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
569 	ctrl &= ~clear;
570 	ctrl |= set;
571 	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
572 }
573 
574 static void __iomem *msix_map_region(struct pci_dev *dev,
575 				     unsigned int nr_entries)
576 {
577 	resource_size_t phys_addr;
578 	u32 table_offset;
579 	unsigned long flags;
580 	u8 bir;
581 
582 	pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
583 			      &table_offset);
584 	bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
585 	flags = pci_resource_flags(dev, bir);
586 	if (!flags || (flags & IORESOURCE_UNSET))
587 		return NULL;
588 
589 	table_offset &= PCI_MSIX_TABLE_OFFSET;
590 	phys_addr = pci_resource_start(dev, bir) + table_offset;
591 
592 	return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
593 }
594 
595 /**
596  * msix_prepare_msi_desc - Prepare a half initialized MSI descriptor for operation
597  * @dev:	The PCI device for which the descriptor is prepared
598  * @desc:	The MSI descriptor for preparation
599  *
600  * This is separate from msix_setup_msi_descs() below to handle dynamic
601  * allocations for MSI-X after initial enablement.
602  *
603  * Ideally the whole MSI-X setup would work that way, but there is no way to
604  * support this for the legacy arch_setup_msi_irqs() mechanism and for the
605  * fake irq domains like the x86 XEN one. Sigh...
606  *
607  * The descriptor is zeroed and only @desc::msi_index and @desc::affinity
608  * are set. When called from msix_setup_msi_descs() then the is_virtual
609  * attribute is initialized as well.
610  *
611  * Fill in the rest.
612  */
613 void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
614 {
615 	desc->nvec_used				= 1;
616 	desc->pci.msi_attrib.is_msix		= 1;
617 	desc->pci.msi_attrib.is_64		= 1;
618 	desc->pci.msi_attrib.default_irq	= dev->irq;
619 	desc->pci.mask_base			= dev->msix_base;
620 
621 
622 	if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY) &&
623 	    !desc->pci.msi_attrib.is_virtual) {
624 		void __iomem *addr = pci_msix_desc_addr(desc);
625 
626 		desc->pci.msi_attrib.can_mask = 1;
627 		/* Workaround for SUN NIU insanity, which requires write before read */
628 		if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST)
629 			writel(0, addr + PCI_MSIX_ENTRY_DATA);
630 		desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
631 	}
632 }
633 
634 static int msix_setup_msi_descs(struct pci_dev *dev, struct msix_entry *entries,
635 				int nvec, struct irq_affinity_desc *masks)
636 {
637 	int ret = 0, i, vec_count = pci_msix_vec_count(dev);
638 	struct irq_affinity_desc *curmsk;
639 	struct msi_desc desc;
640 
641 	memset(&desc, 0, sizeof(desc));
642 
643 	for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) {
644 		desc.msi_index = entries ? entries[i].entry : i;
645 		desc.affinity = masks ? curmsk : NULL;
646 		desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count;
647 
648 		msix_prepare_msi_desc(dev, &desc);
649 
650 		ret = msi_insert_msi_desc(&dev->dev, &desc);
651 		if (ret)
652 			break;
653 	}
654 	return ret;
655 }
656 
657 static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
658 {
659 	struct msi_desc *desc;
660 
661 	if (entries) {
662 		msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) {
663 			entries->vector = desc->irq;
664 			entries++;
665 		}
666 	}
667 }
668 
669 static void msix_mask_all(void __iomem *base, int tsize)
670 {
671 	u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
672 	int i;
673 
674 	for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
675 		writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
676 }
677 
678 DEFINE_FREE(free_msi_irqs, struct pci_dev *, if (_T) pci_free_msi_irqs(_T));
679 
680 static int __msix_setup_interrupts(struct pci_dev *__dev, struct msix_entry *entries,
681 				   int nvec, struct irq_affinity_desc *masks)
682 {
683 	struct pci_dev *dev __free(free_msi_irqs) = __dev;
684 
685 	int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
686 	if (ret)
687 		return ret;
688 
689 	ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
690 	if (ret)
691 		return ret;
692 
693 	/* Check if all MSI entries honor device restrictions */
694 	ret = msi_verify_entries(dev);
695 	if (ret)
696 		return ret;
697 
698 	msix_update_entries(dev, entries);
699 	retain_and_null_ptr(dev);
700 	return 0;
701 }
702 
703 static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
704 				 int nvec, struct irq_affinity *affd)
705 {
706 	struct irq_affinity_desc *masks __free(kfree) =
707 		affd ? irq_create_affinity_masks(nvec, affd) : NULL;
708 
709 	guard(msi_descs_lock)(&dev->dev);
710 	return __msix_setup_interrupts(dev, entries, nvec, masks);
711 }
712 
713 /**
714  * msix_capability_init - configure device's MSI-X capability
715  * @dev: pointer to the pci_dev data structure of MSI-X device function
716  * @entries: pointer to an array of struct msix_entry entries
717  * @nvec: number of @entries
718  * @affd: Optional pointer to enable automatic affinity assignment
719  *
720  * Setup the MSI-X capability structure of device function with a
721  * single MSI-X IRQ. A return of zero indicates the successful setup of
722  * requested MSI-X entries with allocated IRQs or non-zero for otherwise.
723  **/
724 static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
725 				int nvec, struct irq_affinity *affd)
726 {
727 	int ret, tsize;
728 	u16 control;
729 
730 	/*
731 	 * Some devices require MSI-X to be enabled before the MSI-X
732 	 * registers can be accessed.  Mask all the vectors to prevent
733 	 * interrupts coming in before they're fully set up.
734 	 */
735 	pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
736 				    PCI_MSIX_FLAGS_ENABLE);
737 
738 	/* Mark it enabled so setup functions can query it */
739 	dev->msix_enabled = 1;
740 
741 	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
742 	/* Request & Map MSI-X table region */
743 	tsize = msix_table_size(control);
744 	dev->msix_base = msix_map_region(dev, tsize);
745 	if (!dev->msix_base) {
746 		ret = -ENOMEM;
747 		goto out_disable;
748 	}
749 
750 	ret = msix_setup_interrupts(dev, entries, nvec, affd);
751 	if (ret)
752 		goto out_unmap;
753 
754 	/* Disable INTX */
755 	pci_intx_for_msi(dev, 0);
756 
757 	if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY)) {
758 		/*
759 		 * Ensure that all table entries are masked to prevent
760 		 * stale entries from firing in a crash kernel.
761 		 *
762 		 * Done late to deal with a broken Marvell NVME device
763 		 * which takes the MSI-X mask bits into account even
764 		 * when MSI-X is disabled, which prevents MSI delivery.
765 		 */
766 		msix_mask_all(dev->msix_base, tsize);
767 	}
768 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
769 
770 	pcibios_free_irq(dev);
771 	return 0;
772 
773 out_unmap:
774 	iounmap(dev->msix_base);
775 out_disable:
776 	dev->msix_enabled = 0;
777 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
778 
779 	return ret;
780 }
781 
782 static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *entries, int nvec)
783 {
784 	bool nogap;
785 	int i, j;
786 
787 	if (!entries)
788 		return true;
789 
790 	nogap = pci_msi_domain_supports(dev, MSI_FLAG_MSIX_CONTIGUOUS, DENY_LEGACY);
791 
792 	for (i = 0; i < nvec; i++) {
793 		/* Check for duplicate entries */
794 		for (j = i + 1; j < nvec; j++) {
795 			if (entries[i].entry == entries[j].entry)
796 				return false;
797 		}
798 		/* Check for unsupported gaps */
799 		if (nogap && entries[i].entry != i)
800 			return false;
801 	}
802 	return true;
803 }
804 
805 int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec,
806 			    int maxvec, struct irq_affinity *affd, int flags)
807 {
808 	int hwsize, rc, nvec = maxvec;
809 
810 	if (maxvec < minvec)
811 		return -ERANGE;
812 
813 	if (dev->msi_enabled) {
814 		pci_info(dev, "can't enable MSI-X (MSI already enabled)\n");
815 		return -EINVAL;
816 	}
817 
818 	if (WARN_ON_ONCE(dev->msix_enabled))
819 		return -EINVAL;
820 
821 	/* Check MSI-X early on irq domain enabled architectures */
822 	if (!pci_msi_domain_supports(dev, MSI_FLAG_PCI_MSIX, ALLOW_LEGACY))
823 		return -ENOTSUPP;
824 
825 	if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0)
826 		return -EINVAL;
827 
828 	hwsize = pci_msix_vec_count(dev);
829 	if (hwsize < 0)
830 		return hwsize;
831 
832 	if (!pci_msix_validate_entries(dev, entries, nvec))
833 		return -EINVAL;
834 
835 	if (hwsize < nvec) {
836 		/* Keep the IRQ virtual hackery working */
837 		if (flags & PCI_IRQ_VIRTUAL)
838 			hwsize = nvec;
839 		else
840 			nvec = hwsize;
841 	}
842 
843 	if (nvec < minvec)
844 		return -ENOSPC;
845 
846 	rc = pci_setup_msi_context(dev);
847 	if (rc)
848 		return rc;
849 
850 	if (!pci_setup_msix_device_domain(dev, hwsize))
851 		return -ENODEV;
852 
853 	for (;;) {
854 		if (affd) {
855 			nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
856 			if (nvec < minvec)
857 				return -ENOSPC;
858 		}
859 
860 		rc = msix_capability_init(dev, entries, nvec, affd);
861 		if (rc == 0)
862 			return nvec;
863 
864 		if (rc < 0)
865 			return rc;
866 		if (rc < minvec)
867 			return -ENOSPC;
868 
869 		nvec = rc;
870 	}
871 }
872 
873 void __pci_restore_msix_state(struct pci_dev *dev)
874 {
875 	struct msi_desc *entry;
876 	bool write_msg;
877 
878 	if (!dev->msix_enabled)
879 		return;
880 
881 	/* route the table */
882 	pci_intx_for_msi(dev, 0);
883 	pci_msix_clear_and_set_ctrl(dev, 0,
884 				PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
885 
886 	write_msg = arch_restore_msi_irqs(dev);
887 
888 	scoped_guard (msi_descs_lock, &dev->dev) {
889 		msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
890 			if (write_msg)
891 				__pci_write_msi_msg(entry, &entry->msg);
892 			pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
893 		}
894 	}
895 
896 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
897 }
898 
899 void pci_msix_shutdown(struct pci_dev *dev)
900 {
901 	struct msi_desc *desc;
902 
903 	if (!pci_msi_enable || !dev || !dev->msix_enabled)
904 		return;
905 
906 	if (pci_dev_is_disconnected(dev)) {
907 		dev->msix_enabled = 0;
908 		return;
909 	}
910 
911 	/* Return the device with MSI-X masked as initial states */
912 	msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL)
913 		pci_msix_mask(desc);
914 
915 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
916 	pci_intx_for_msi(dev, 1);
917 	dev->msix_enabled = 0;
918 	pcibios_alloc_irq(dev);
919 }
920 
921 /* Common interfaces */
922 
923 void pci_free_msi_irqs(struct pci_dev *dev)
924 {
925 	pci_msi_teardown_msi_irqs(dev);
926 
927 	if (dev->msix_base) {
928 		iounmap(dev->msix_base);
929 		dev->msix_base = NULL;
930 	}
931 }
932 
933 #ifdef CONFIG_PCIE_TPH
934 /**
935  * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
936  * @pdev:	The PCIe device to update
937  * @index:	The MSI-X index to update
938  * @tag:	The tag to write
939  *
940  * Returns: 0 on success, error code on failure
941  */
942 int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
943 {
944 	struct msi_desc *msi_desc;
945 	struct irq_desc *irq_desc;
946 	unsigned int virq;
947 
948 	if (!pdev->msix_enabled)
949 		return -ENXIO;
950 
951 	virq = msi_get_virq(&pdev->dev, index);
952 	if (!virq)
953 		return -ENXIO;
954 
955 	guard(msi_descs_lock)(&pdev->dev);
956 
957 	/*
958 	 * This is a horrible hack, but short of implementing a PCI
959 	 * specific interrupt chip callback and a huge pile of
960 	 * infrastructure, this is the minor nuisance. It provides the
961 	 * protection against concurrent operations on this entry and keeps
962 	 * the control word cache in sync.
963 	 */
964 	irq_desc = irq_to_desc(virq);
965 	if (!irq_desc)
966 		return -ENXIO;
967 
968 	guard(raw_spinlock_irq)(&irq_desc->lock);
969 	msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data);
970 	if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual)
971 		return -ENXIO;
972 
973 	msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST;
974 	msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
975 	pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl);
976 	/* Flush the write */
977 	readl(pci_msix_desc_addr(msi_desc));
978 	return 0;
979 }
980 #endif
981 
982 /* Misc. infrastructure */
983 
984 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
985 {
986 	return to_pci_dev(desc->dev);
987 }
988 EXPORT_SYMBOL(msi_desc_to_pci_dev);
989 
990 void pci_no_msi(void)
991 {
992 	pci_msi_enable = false;
993 }
994