Lines Matching refs:range

114 static int knav_queue_setup_irq(struct knav_range_info *range,  in knav_queue_setup_irq()  argument
117 unsigned queue = inst->id - range->queue_base; in knav_queue_setup_irq()
120 if (range->flags & RANGE_HAS_IRQ) { in knav_queue_setup_irq()
121 irq = range->irqs[queue].irq; in knav_queue_setup_irq()
126 if (range->irqs[queue].cpu_mask) { in knav_queue_setup_irq()
127 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask); in knav_queue_setup_irq()
129 dev_warn(range->kdev->dev, in knav_queue_setup_irq()
140 struct knav_range_info *range = inst->range; in knav_queue_free_irq() local
141 unsigned queue = inst->id - inst->range->queue_base; in knav_queue_free_irq()
144 if (range->flags & RANGE_HAS_IRQ) { in knav_queue_free_irq()
145 irq = range->irqs[queue].irq; in knav_queue_free_irq()
158 return inst->range->flags & RANGE_RESERVED; in knav_queue_is_reserved()
180 (inst->range->flags & RANGE_HAS_IRQ)) { in knav_queue_match_type()
183 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) { in knav_queue_match_type()
186 !(inst->range->flags & in knav_queue_match_type()
242 struct knav_range_info *range = inst->range; in __knav_queue_open() local
245 if (range->ops && range->ops->open_queue) in __knav_queue_open()
246 ret = range->ops->open_queue(range, inst, flags); in __knav_queue_open()
317 struct knav_range_info *range = inst->range; in knav_queue_set_notify() local
319 if (range->ops && range->ops->set_notify) in knav_queue_set_notify()
320 range->ops->set_notify(range, inst, enabled); in knav_queue_set_notify()
368 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) in knav_queue_set_notifier()
383 static int knav_gp_set_notify(struct knav_range_info *range, in knav_gp_set_notify() argument
389 if (range->flags & RANGE_HAS_IRQ) { in knav_gp_set_notify()
390 queue = inst->id - range->queue_base; in knav_gp_set_notify()
392 enable_irq(range->irqs[queue].irq); in knav_gp_set_notify()
394 disable_irq_nosync(range->irqs[queue].irq); in knav_gp_set_notify()
399 static int knav_gp_open_queue(struct knav_range_info *range, in knav_gp_open_queue() argument
402 return knav_queue_setup_irq(range, inst); in knav_gp_open_queue()
405 static int knav_gp_close_queue(struct knav_range_info *range, in knav_gp_close_queue() argument
563 struct knav_range_info *range = inst->range; in knav_queue_close() local
565 if (range->ops && range->ops->close_queue) in knav_queue_close()
566 range->ops->close_queue(range, inst); in knav_queue_close()
1212 struct knav_range_info *range; in knav_setup_queue_range() local
1217 range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL); in knav_setup_queue_range()
1218 if (!range) { in knav_setup_queue_range()
1223 range->kdev = kdev; in knav_setup_queue_range()
1224 range->name = knav_queue_find_name(node); in knav_setup_queue_range()
1227 range->queue_base = temp[0] - kdev->base_id; in knav_setup_queue_range()
1228 range->num_queues = temp[1]; in knav_setup_queue_range()
1230 dev_err(dev, "invalid queue range %s\n", range->name); in knav_setup_queue_range()
1231 devm_kfree(dev, range); in knav_setup_queue_range()
1241 range->irqs[i].irq = irq_create_of_mapping(&oirq); in knav_setup_queue_range()
1242 if (range->irqs[i].irq == IRQ_NONE) in knav_setup_queue_range()
1245 range->num_irqs++; in knav_setup_queue_range()
1251 range->irqs[i].cpu_mask = devm_kzalloc(dev, in knav_setup_queue_range()
1253 if (!range->irqs[i].cpu_mask) in knav_setup_queue_range()
1258 cpumask_set_cpu(bit, range->irqs[i].cpu_mask); in knav_setup_queue_range()
1262 range->num_irqs = min(range->num_irqs, range->num_queues); in knav_setup_queue_range()
1263 if (range->num_irqs) in knav_setup_queue_range()
1264 range->flags |= RANGE_HAS_IRQ; in knav_setup_queue_range()
1267 range->flags |= RANGE_RESERVED; in knav_setup_queue_range()
1270 ret = knav_init_acc_range(kdev, node, range); in knav_setup_queue_range()
1272 devm_kfree(dev, range); in knav_setup_queue_range()
1276 range->ops = &knav_gp_range_ops; in knav_setup_queue_range()
1281 start = max(qmgr->start_queue, range->queue_base); in knav_setup_queue_range()
1283 range->queue_base + range->num_queues); in knav_setup_queue_range()
1293 list_add_tail(&range->list, &kdev->queue_ranges); in knav_setup_queue_range()
1295 range->name, range->queue_base, in knav_setup_queue_range()
1296 range->queue_base + range->num_queues - 1, in knav_setup_queue_range()
1297 range->num_irqs, in knav_setup_queue_range()
1298 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "", in knav_setup_queue_range()
1299 (range->flags & RANGE_RESERVED) ? ", reserved" : "", in knav_setup_queue_range()
1300 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : ""); in knav_setup_queue_range()
1301 kdev->num_queues_in_use += range->num_queues; in knav_setup_queue_range()
1310 struct device_node *type, *range; in knav_setup_queue_pools() local
1317 for_each_child_of_node(type, range) { in knav_setup_queue_pools()
1319 knav_setup_queue_range(kdev, range); in knav_setup_queue_pools()
1331 struct knav_range_info *range) in knav_free_queue_range() argument
1333 if (range->ops && range->ops->free_range) in knav_free_queue_range()
1334 range->ops->free_range(range); in knav_free_queue_range()
1335 list_del(&range->list); in knav_free_queue_range()
1336 devm_kfree(kdev->dev, range); in knav_free_queue_range()
1341 struct knav_range_info *range; in knav_free_queue_ranges() local
1344 range = first_queue_range(kdev); in knav_free_queue_ranges()
1345 if (!range) in knav_free_queue_ranges()
1347 knav_free_queue_range(kdev, range); in knav_free_queue_ranges()
1712 struct knav_range_info *range, in knav_queue_init_queue() argument
1723 inst->range = range; in knav_queue_init_queue()
1729 if (range->ops && range->ops->init_queue) in knav_queue_init_queue()
1730 return range->ops->init_queue(range, inst); in knav_queue_init_queue()
1737 struct knav_range_info *range; in knav_queue_init_queues() local
1753 for_each_queue_range(kdev, range) { in knav_queue_init_queues()
1754 if (range->ops && range->ops->init_range) in knav_queue_init_queues()
1755 range->ops->init_range(range); in knav_queue_init_queues()
1757 for (id = range->queue_base; in knav_queue_init_queues()
1758 id < range->queue_base + range->num_queues; id++, idx++) { in knav_queue_init_queues()
1759 ret = knav_queue_init_queue(kdev, range, in knav_queue_init_queues()
1764 range->queue_base_inst = in knav_queue_init_queues()