Lines Matching +full:pci +full:- +full:domain

1 // SPDX-License-Identifier: GPL-2.0
9 * PCI compatible and non PCI compatible devices.
21 * alloc_msi_entry - Allocate an initialize msi_entry
38 INIT_LIST_HEAD(&desc->list); in alloc_msi_entry()
39 desc->dev = dev; in alloc_msi_entry()
40 desc->nvec_used = nvec; in alloc_msi_entry()
42 desc->affinity = kmemdup(affinity, in alloc_msi_entry()
43 nvec * sizeof(*desc->affinity), GFP_KERNEL); in alloc_msi_entry()
44 if (!desc->affinity) { in alloc_msi_entry()
55 kfree(entry->affinity); in free_msi_entry()
61 *msg = entry->msg; in __get_cached_msi_msg()
76 data->chip->irq_write_msi_msg(data, msg); in irq_chip_write_msi_msg()
79 static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg) in msi_check_level() argument
81 struct msi_domain_info *info = domain->host_data; in msi_check_level()
85 * not advertized that it is level-capable, signal the breakage. in msi_check_level()
87 WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) && in msi_check_level()
88 (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) && in msi_check_level()
93 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
104 struct irq_data *parent = irq_data->parent_data; in msi_domain_set_affinity()
108 ret = parent->chip->irq_set_affinity(parent, mask, force); in msi_domain_set_affinity()
111 msi_check_level(irq_data->domain, msg); in msi_domain_set_affinity()
118 static int msi_domain_activate(struct irq_domain *domain, in msi_domain_activate() argument
124 msi_check_level(irq_data->domain, msg); in msi_domain_activate()
129 static void msi_domain_deactivate(struct irq_domain *domain, in msi_domain_deactivate() argument
138 static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, in msi_domain_alloc() argument
141 struct msi_domain_info *info = domain->host_data; in msi_domain_alloc()
142 struct msi_domain_ops *ops = info->ops; in msi_domain_alloc()
143 irq_hw_number_t hwirq = ops->get_hwirq(info, arg); in msi_domain_alloc()
146 if (irq_find_mapping(domain, hwirq) > 0) in msi_domain_alloc()
147 return -EEXIST; in msi_domain_alloc()
149 if (domain->parent) { in msi_domain_alloc()
150 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); in msi_domain_alloc()
156 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); in msi_domain_alloc()
158 if (ops->msi_free) { in msi_domain_alloc()
159 for (i--; i > 0; i--) in msi_domain_alloc()
160 ops->msi_free(domain, info, virq + i); in msi_domain_alloc()
162 irq_domain_free_irqs_top(domain, virq, nr_irqs); in msi_domain_alloc()
170 static void msi_domain_free(struct irq_domain *domain, unsigned int virq, in msi_domain_free() argument
173 struct msi_domain_info *info = domain->host_data; in msi_domain_free()
176 if (info->ops->msi_free) { in msi_domain_free()
178 info->ops->msi_free(domain, info, virq + i); in msi_domain_free()
180 irq_domain_free_irqs_top(domain, virq, nr_irqs); in msi_domain_free()
193 return arg->hwirq; in msi_domain_ops_get_hwirq()
196 static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, in msi_domain_ops_prepare() argument
206 arg->desc = desc; in msi_domain_ops_set_desc()
209 static int msi_domain_ops_init(struct irq_domain *domain, in msi_domain_ops_init() argument
214 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, in msi_domain_ops_init()
215 info->chip_data); in msi_domain_ops_init()
216 if (info->handler && info->handler_name) { in msi_domain_ops_init()
217 __irq_set_handler(virq, info->handler, 0, info->handler_name); in msi_domain_ops_init()
218 if (info->handler_data) in msi_domain_ops_init()
219 irq_set_handler_data(virq, info->handler_data); in msi_domain_ops_init()
224 static int msi_domain_ops_check(struct irq_domain *domain, in msi_domain_ops_check() argument
243 struct msi_domain_ops *ops = info->ops; in msi_domain_update_dom_ops()
246 info->ops = &msi_domain_ops_default; in msi_domain_update_dom_ops()
250 if (ops->domain_alloc_irqs == NULL) in msi_domain_update_dom_ops()
251 ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs; in msi_domain_update_dom_ops()
252 if (ops->domain_free_irqs == NULL) in msi_domain_update_dom_ops()
253 ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs; in msi_domain_update_dom_ops()
255 if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS)) in msi_domain_update_dom_ops()
258 if (ops->get_hwirq == NULL) in msi_domain_update_dom_ops()
259 ops->get_hwirq = msi_domain_ops_default.get_hwirq; in msi_domain_update_dom_ops()
260 if (ops->msi_init == NULL) in msi_domain_update_dom_ops()
261 ops->msi_init = msi_domain_ops_default.msi_init; in msi_domain_update_dom_ops()
262 if (ops->msi_check == NULL) in msi_domain_update_dom_ops()
263 ops->msi_check = msi_domain_ops_default.msi_check; in msi_domain_update_dom_ops()
264 if (ops->msi_prepare == NULL) in msi_domain_update_dom_ops()
265 ops->msi_prepare = msi_domain_ops_default.msi_prepare; in msi_domain_update_dom_ops()
266 if (ops->set_desc == NULL) in msi_domain_update_dom_ops()
267 ops->set_desc = msi_domain_ops_default.set_desc; in msi_domain_update_dom_ops()
272 struct irq_chip *chip = info->chip; in msi_domain_update_chip_ops()
274 BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); in msi_domain_update_chip_ops()
275 if (!chip->irq_set_affinity) in msi_domain_update_chip_ops()
276 chip->irq_set_affinity = msi_domain_set_affinity; in msi_domain_update_chip_ops()
280 * msi_create_irq_domain - Create a MSI interrupt domain
282 * @info: MSI domain info
283 * @parent: Parent irq domain
289 struct irq_domain *domain; in msi_create_irq_domain() local
292 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) in msi_create_irq_domain()
295 domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0, in msi_create_irq_domain()
298 if (domain && !domain->name && info->chip) in msi_create_irq_domain()
299 domain->name = info->chip->name; in msi_create_irq_domain()
301 return domain; in msi_create_irq_domain()
304 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, in msi_domain_prepare_irqs() argument
307 struct msi_domain_info *info = domain->host_data; in msi_domain_prepare_irqs()
308 struct msi_domain_ops *ops = info->ops; in msi_domain_prepare_irqs()
311 ret = ops->msi_check(domain, info, dev); in msi_domain_prepare_irqs()
313 ret = ops->msi_prepare(domain, dev, nvec, arg); in msi_domain_prepare_irqs()
318 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, in msi_domain_populate_irqs() argument
321 struct msi_domain_info *info = domain->host_data; in msi_domain_populate_irqs()
322 struct msi_domain_ops *ops = info->ops; in msi_domain_populate_irqs()
327 /* Don't even try the multi-MSI brain damage. */ in msi_domain_populate_irqs()
328 if (WARN_ON(!desc->irq || desc->nvec_used != 1)) { in msi_domain_populate_irqs()
329 ret = -EINVAL; in msi_domain_populate_irqs()
333 if (!(desc->irq >= virq && desc->irq < (virq + nvec))) in msi_domain_populate_irqs()
336 ops->set_desc(arg, desc); in msi_domain_populate_irqs()
337 /* Assumes the domain mutex is held! */ in msi_domain_populate_irqs()
338 ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1, in msi_domain_populate_irqs()
343 irq_set_msi_desc_off(desc->irq, 0, desc); in msi_domain_populate_irqs()
349 if (!(desc->irq >= virq && desc->irq < (virq + nvec))) in msi_domain_populate_irqs()
352 irq_domain_free_irqs_common(domain, desc->irq, 1); in msi_domain_populate_irqs()
362 * dummy vector to the device. If the PCI/MSI device does not support
368 * used. For now reservation mode is restricted to PCI/MSI.
370 static bool msi_check_reservation_mode(struct irq_domain *domain, in msi_check_reservation_mode() argument
376 switch(domain->bus_token) { in msi_check_reservation_mode()
384 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) in msi_check_reservation_mode()
395 return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit; in msi_check_reservation_mode()
398 int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, in __msi_domain_alloc_irqs() argument
401 struct msi_domain_info *info = domain->host_data; in __msi_domain_alloc_irqs()
402 struct msi_domain_ops *ops = info->ops; in __msi_domain_alloc_irqs()
409 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); in __msi_domain_alloc_irqs()
414 ops->set_desc(&arg, desc); in __msi_domain_alloc_irqs()
416 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, in __msi_domain_alloc_irqs()
418 desc->affinity); in __msi_domain_alloc_irqs()
420 ret = -ENOSPC; in __msi_domain_alloc_irqs()
421 if (ops->handle_error) in __msi_domain_alloc_irqs()
422 ret = ops->handle_error(domain, desc, ret); in __msi_domain_alloc_irqs()
423 if (ops->msi_finish) in __msi_domain_alloc_irqs()
424 ops->msi_finish(&arg, ret); in __msi_domain_alloc_irqs()
428 for (i = 0; i < desc->nvec_used; i++) { in __msi_domain_alloc_irqs()
434 if (ops->msi_finish) in __msi_domain_alloc_irqs()
435 ops->msi_finish(&arg, 0); in __msi_domain_alloc_irqs()
437 can_reserve = msi_check_reservation_mode(domain, info, dev); in __msi_domain_alloc_irqs()
440 virq = desc->irq; in __msi_domain_alloc_irqs()
441 if (desc->nvec_used == 1) in __msi_domain_alloc_irqs()
444 dev_dbg(dev, "irq [%d-%d] for MSI\n", in __msi_domain_alloc_irqs()
445 virq, virq + desc->nvec_used - 1); in __msi_domain_alloc_irqs()
447 * This flag is set by the PCI layer as we need to activate in __msi_domain_alloc_irqs()
448 * the MSI entries before the PCI layer enables MSI in the in __msi_domain_alloc_irqs()
451 if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) in __msi_domain_alloc_irqs()
454 irq_data = irq_domain_get_irq_data(domain, desc->irq); in __msi_domain_alloc_irqs()
457 if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) in __msi_domain_alloc_irqs()
471 irq_data = irq_domain_get_irq_data(domain, desc->irq); in __msi_domain_alloc_irqs()
481 if (desc->irq == virq) in __msi_domain_alloc_irqs()
484 irqd = irq_domain_get_irq_data(domain, desc->irq); in __msi_domain_alloc_irqs()
488 msi_domain_free_irqs(domain, dev); in __msi_domain_alloc_irqs()
493 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
494 * @domain: The domain to allocate from
501 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, in msi_domain_alloc_irqs() argument
504 struct msi_domain_info *info = domain->host_data; in msi_domain_alloc_irqs()
505 struct msi_domain_ops *ops = info->ops; in msi_domain_alloc_irqs()
507 return ops->domain_alloc_irqs(domain, dev, nvec); in msi_domain_alloc_irqs()
510 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) in __msi_domain_free_irqs() argument
520 if (desc->irq) { in __msi_domain_free_irqs()
521 irq_domain_free_irqs(desc->irq, desc->nvec_used); in __msi_domain_free_irqs()
522 desc->irq = 0; in __msi_domain_free_irqs()
528 * __msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
529 * @domain: The domain to managing the interrupts
533 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) in msi_domain_free_irqs() argument
535 struct msi_domain_info *info = domain->host_data; in msi_domain_free_irqs()
536 struct msi_domain_ops *ops = info->ops; in msi_domain_free_irqs()
538 return ops->domain_free_irqs(domain, dev); in msi_domain_free_irqs()
542 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
543 * @domain: The interrupt domain to retrieve data from
546 * @domain->host_data.
548 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) in msi_get_domain_info() argument
550 return (struct msi_domain_info *)domain->host_data; in msi_get_domain_info()