Home
last modified time | relevance | path

Searched refs:queues (Results 1 – 25 of 421) sorted by relevance

12345678910>>...17

/linux/tools/testing/selftests/drivers/net/
H A Dqueues.py16 folders = glob.glob(f'/sys/class/net/{ifname}/queues/{qtype}-*')
21 queues = nl.queue_get({'ifindex': cfg.ifindex}, dump=True)
22 if queues:
23 return len([q for q in queues if q['type'] == qtype])
40 queues = nl.queue_get({'ifindex': cfg.ifindex}, dump=True)
41 if not queues:
42 raise KsftSkipEx("Netlink reports no queues")
44 for q in queues:
65 queues = nl_get_queues(cfg, snl, qtype)
66 if not queues
[all...]
H A Dstats.py128 queues = NetdevFamily(recv_size=4096).qstats_get({"scope": "queue"}, dump=True)
131 for entry in queues:
137 for ifindex, queues in parsed.items():
139 ksft_eq(len(queues[qtype]), len(set(queues[qtype])),
141 ksft_eq(len(queues[qtype]), max(queues[qtype]) + 1,
236 # Max out the queues, we'll flip between max and 1
/linux/drivers/gpu/drm/imagination/
H A Dpvr_context.c162 * pvr_context_destroy_queues() - Destroy all queues attached to a context.
163 * @ctx: Context to destroy queues on.
166 * It releases all resources attached to the queues bound to this context.
172 pvr_queue_destroy(ctx->queues.fragment); in pvr_context_destroy_queues()
173 pvr_queue_destroy(ctx->queues.geometry); in pvr_context_destroy_queues()
176 pvr_queue_destroy(ctx->queues.compute); in pvr_context_destroy_queues()
179 pvr_queue_destroy(ctx->queues.transfer); in pvr_context_destroy_queues()
185 * pvr_context_create_queues() - Create all queues attached to a context.
186 * @ctx: Context to create queues on.
202 ctx->queues in pvr_context_create_queues()
[all...]
H A Dpvr_queue.c533 job->ctx->queues.fragment); in pvr_queue_prepare_job()
574 lockdep_assert_held(&pvr_dev->queues.lock); in pvr_queue_update_active_state_locked()
584 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_update_active_state_locked()
586 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_update_active_state_locked()
606 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_update_active_state()
608 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_update_active_state()
740 struct pvr_queue *geom_queue = job->ctx->queues.geometry; in pvr_queue_run_job()
741 struct pvr_queue *frag_queue = job->ctx->queues.fragment; in pvr_queue_run_job()
775 /* Make sure we CPU-signal the UFO object, so other queues don't get in pvr_queue_start()
829 mutex_lock(&pvr_dev->queues in pvr_queue_timedout_job()
[all...]
H A Dpvr_context.h64 * @faulty: Set to 1 when the context queues had unfinished job when
72 /** @queues: Union containing all kind of queues. */
87 } queues; member
98 return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.geometry : NULL; in pvr_context_get_queue_for_job()
100 return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.fragment : NULL; in pvr_context_get_queue_for_job()
102 return ctx->type == DRM_PVR_CTX_TYPE_COMPUTE ? ctx->queues.compute : NULL; in pvr_context_get_queue_for_job()
104 return ctx->type == DRM_PVR_CTX_TYPE_TRANSFER_FRAG ? ctx->queues.transfer : NULL; in pvr_context_get_queue_for_job()
/linux/net/sched/
H A Dsch_multiq.c25 struct Qdisc **queues; member
54 return q->queues[0]; in multiq_classify()
56 return q->queues[band]; in multiq_classify()
105 qdisc = q->queues[q->curband]; in multiq_dequeue()
137 qdisc = q->queues[curband]; in multiq_peek()
154 qdisc_reset(q->queues[band]); in multiq_reset()
166 qdisc_put(q->queues[band]); in multiq_destroy()
168 kfree(q->queues); in multiq_destroy()
196 if (q->queues[i] != &noop_qdisc) { in multiq_tune()
197 struct Qdisc *child = q->queues[ in multiq_tune()
[all...]
H A Dsch_prio.c26 struct Qdisc *queues[TCQ_PRIO_BANDS]; member
57 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify()
63 return q->queues[q->prio2band[0]]; in prio_classify()
65 return q->queues[band]; in prio_classify()
103 struct Qdisc *qdisc = q->queues[prio]; in prio_peek()
117 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue()
137 qdisc_reset(q->queues[prio]); in prio_reset()
173 qdisc_put(q->queues[prio]); in prio_destroy()
180 struct Qdisc *queues[TCQ_PRIO_BANDS]; in prio_tune() local
198 queues[ in prio_tune()
[all...]
/linux/drivers/nvme/target/
H A Dloop.c30 struct nvme_loop_queue *queues; member
73 return queue - queue->ctrl->queues; in nvme_loop_queue_idx()
177 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event()
198 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod()
222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx()
242 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx()
266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) in nvme_loop_destroy_admin_queue()
275 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue()
276 nvmet_cq_put(&ctrl->queues[0].nvme_cq); in nvme_loop_destroy_admin_queue()
293 kfree(ctrl->queues); in nvme_loop_free_ctrl()
[all...]
/linux/Documentation/networking/
H A Dmulti-pf-netdev.rst63 Each combined channel works against one specific PF, creating all its datapath queues against it. We
126 that is capable of pointing to the receive queues of a different PF.
142 - /sys/class/net/eth2/queues/tx-0/xps_cpus:000001
143 - /sys/class/net/eth2/queues/tx-1/xps_cpus:001000
144 - /sys/class/net/eth2/queues/tx-2/xps_cpus:000002
145 - /sys/class/net/eth2/queues/tx-3/xps_cpus:002000
146 - /sys/class/net/eth2/queues/tx-4/xps_cpus:000004
147 - /sys/class/net/eth2/queues/tx-5/xps_cpus:004000
148 - /sys/class/net/eth2/queues/tx-6/xps_cpus:000008
149 - /sys/class/net/eth2/queues/t
[all...]
H A Dtc-queue-filters.rst7 TC can be used for directing traffic to either a set of queues or
12 1) TC filter directing traffic to a set of queues is achieved
14 the priority maps to a traffic class (set of queues) when
23 queues and/or a single queue are supported as below:
25 1) TC flower filter directs incoming traffic to a set of queues using
H A Dmultiqueue.rst18 the subqueue memory, as well as netdev configuration of where the queues
21 The base driver will also need to manage the queues as it does the global
33 A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The
35 bands and queues based on the value in skb->queue_mapping. Use this field in
42 On qdisc load, the number of bands is based on the number of queues on the
56 The qdisc will allocate the number of bands to equal the number of queues that
58 queues, the band mapping would look like::
/linux/drivers/net/wireless/silabs/wfx/
H A Dqueue.c233 struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; in wfx_tx_queues_get_skb() local
239 /* sort the queues */ in wfx_tx_queues_get_skb()
243 WARN_ON(num_queues >= ARRAY_SIZE(queues)); in wfx_tx_queues_get_skb()
244 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb()
246 if (wfx_tx_queue_get_weight(queues[j]) < in wfx_tx_queues_get_skb()
247 wfx_tx_queue_get_weight(queues[j - 1])) in wfx_tx_queues_get_skb()
248 swap(queues[j - 1], queues[j]); in wfx_tx_queues_get_skb()
256 skb = skb_dequeue(&queues[i]->offchan); in wfx_tx_queues_get_skb()
264 atomic_inc(&queues[ in wfx_tx_queues_get_skb()
[all...]
/linux/Documentation/ABI/testing/
H A Dsysfs-class-net-queues1 What: /sys/class/net/<iface>/queues/rx-<queue>/rps_cpus
11 What: /sys/class/net/<iface>/queues/rx-<queue>/rps_flow_cnt
19 What: /sys/class/net/<iface>/queues/tx-<queue>/tx_timeout
27 What: /sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate
35 What: /sys/class/net/<iface>/queues/tx-<queue>/xps_cpus
45 What: /sys/class/net/<iface>/queues/tx-<queue>/xps_rxqs
56 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
65 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
73 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit
82 What: /sys/class/net/<iface>/queues/t
[all...]
/linux/Documentation/gpu/amdgpu/gc/
H A Dmes.rst12 If not, check :ref:`GFX, Compute, and SDMA Overall Behavior<pipes-and-queues-description>`
15 Every GFX has a pipe component with one or more hardware queues. Pipes can
16 switch between queues depending on certain conditions, and one of the
26 queues. The CS IOCTL takes the command buffer from the applications and
29 2. User Queues: These queues are dynamically mapped to the HQDs. Regarding the
31 queues and submit work directly to its user queues with no need to IOCTL for
35 more MQDs than HQDs, the MES firmware will preempt other user queues to make
36 sure each queues get a time slice; in other words, MES is a microcontroller
/linux/drivers/media/platform/nxp/imx8-isi/
H A Dimx8-isi-m2m.c53 /* Protects the m2m vb2 queues */
59 } queues; member
86 return &ctx->queues.out; in mxc_isi_m2m_ctx_qdata()
88 return &ctx->queues.cap; in mxc_isi_m2m_ctx_qdata()
113 src_vbuf->sequence = ctx->queues.out.sequence++; in mxc_isi_m2m_frame_write_done()
114 dst_vbuf->sequence = ctx->queues.cap.sequence++; in mxc_isi_m2m_frame_write_done()
136 .width = ctx->queues.out.format.width, in mxc_isi_m2m_device_run()
137 .height = ctx->queues.out.format.height, in mxc_isi_m2m_device_run()
140 .width = ctx->queues.cap.format.width, in mxc_isi_m2m_device_run()
141 .height = ctx->queues in mxc_isi_m2m_device_run()
[all...]
/linux/Documentation/devicetree/bindings/soc/ti/
H A Dkeystone-navigator-qmss.txt9 management of the packet queues. Packets are queued/de-queued by writing or
32 -- managed-queues : the actual queues managed by each queue manager
33 instance, specified as <"base queue #" "# of queues">.
51 - qpend : pool of qpend(interruptible) queues
52 - general-purpose : pool of general queues, primarily used
53 as free descriptor queues or the
54 transmit DMA queues.
55 - accumulator : pool of queues on PDSP accumulator channel
57 -- qrange : number of queues t
[all...]
/linux/sound/virtio/
H A Dvirtio_card.h47 * @queues: Virtqueue wrappers.
64 struct virtio_snd_queue queues[VIRTIO_SND_VQ_MAX]; member
86 return &snd->queues[VIRTIO_SND_VQ_CONTROL]; in virtsnd_control_queue()
92 return &snd->queues[VIRTIO_SND_VQ_EVENT]; in virtsnd_event_queue()
98 return &snd->queues[VIRTIO_SND_VQ_TX]; in virtsnd_tx_queue()
104 return &snd->queues[VIRTIO_SND_VQ_RX]; in virtsnd_rx_queue()
/linux/Documentation/networking/device_drivers/ethernet/ti/
H A Dcpsw.rst26 - TX queues must be rated starting from txq0 that has highest priority
28 - CBS shapers should be used with rated queues
30 potential incoming rate, thus, rate of all incoming tx queues has
150 // Add 4 tx queues, for interface Eth0, and 1 tx queue for Eth1
156 // Check if num of queues is set correctly:
172 // TX queues must be rated starting from 0, so set bws for tx0 and tx1
175 // Leave last 2 tx queues not rated.
176 $ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate
177 $ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate
181 // Check maximum rate of tx (cpdma) queues
[all...]
/linux/drivers/target/
H A Dtarget_core_tmr.c118 flush_work(&dev->queues[i].sq.work); in core_tmr_abort_task()
120 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_abort_task()
121 list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, in core_tmr_abort_task()
148 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task()
163 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task()
301 flush_work(&dev->queues[i].sq.work); in core_tmr_drain_state_list()
303 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_drain_state_list()
304 list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, in core_tmr_drain_state_list()
333 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_drain_state_list()
/linux/tools/perf/util/
H A Dintel-bts.c46 struct auxtrace_queues queues; member
211 for (i = 0; i < bts->queues.nr_queues; i++) { in intel_bts_setup_queues()
212 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i], in intel_bts_setup_queues()
222 if (bts->queues.new_data) { in intel_bts_update_queues()
223 bts->queues.new_data = false; in intel_bts_update_queues()
467 queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; in intel_bts_process_queue()
541 struct auxtrace_queues *queues = &bts->queues; in intel_bts_process_tid_exit() local
544 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_process_tid_exit()
545 struct auxtrace_queue *queue = &bts->queues in intel_bts_process_tid_exit()
712 struct auxtrace_queues *queues = &bts->queues; intel_bts_free_events() local
[all...]
H A Ds390-cpumsf.c47 * To sort the queues in chronological order, all queue access is controlled
54 * After the auxtrace infrastructure has been setup, the auxtrace queues are
61 * record sample, the auxtrace queues will be processed. As auxtrace queues
170 struct auxtrace_queues queues; member
203 if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu) in s390_cpumcf_dumpctr()
206 q = &sf->queues.queue_array[sample->cpu]; in s390_cpumcf_dumpctr()
703 queue = &sfq->sf->queues.queue_array[sfq->queue_nr]; in s390_cpumsf_run_decoder()
827 for (i = 0; i < sf->queues.nr_queues; i++) { in s390_cpumsf_setup_queues()
828 ret = s390_cpumsf_setup_queue(sf, &sf->queues in s390_cpumsf_setup_queues()
1013 struct auxtrace_queues *queues = &sf->queues; s390_cpumsf_free_queues() local
[all...]
/linux/Documentation/arch/arm/keystone/
H A Dknav-qmss.rst15 management of the packet queues. Packets are queued/de-queued by writing or
24 knav qmss driver provides a set of APIs to drivers to open/close qmss queues,
25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
31 Accumulator QMSS queues using PDSP firmware
34 queue or multiple contiguous queues. drivers/soc/ti/knav_qmss_acc.c is the
37 1 or 32 queues per channel. More description on the firmware is available in
56 Use of accumulated queues requires the firmware image to be present in the
57 file system. The driver doesn't acc queues to the supported queue range if
/linux/include/linux/
H A Dptr_ring.h626 void ***queues; in ptr_ring_resize_multiple_bh_noprof() local
629 queues = kmalloc_array_noprof(nrings, sizeof(*queues), gfp); in ptr_ring_resize_multiple_bh_noprof()
630 if (!queues) in ptr_ring_resize_multiple_bh_noprof()
634 queues[i] = __ptr_ring_init_queue_alloc_noprof(size, gfp); in ptr_ring_resize_multiple_bh_noprof()
635 if (!queues[i]) in ptr_ring_resize_multiple_bh_noprof()
642 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple_bh_noprof()
649 kvfree(queues[i]); in ptr_ring_resize_multiple_bh_noprof()
651 kfree(queues); in ptr_ring_resize_multiple_bh_noprof()
[all...]
/linux/Documentation/block/
H A Dblk-mq.rst37 spawns multiple queues with individual entry points local to the CPU, removing
49 blk-mq has two group of queues: software staging queues and hardware dispatch
50 queues. When the request arrives at the block layer, it will try the shortest
56 Then, after the requests are processed by software queues, they will be placed
62 Software staging queues
65 The block IO subsystem adds requests in the software staging queues
71 the number of queues is defined by a per-CPU or per-node basis.
93 requests from different queues, otherwise there would be cache trashing and a
99 queue (a.k.a. run the hardware queue), the software queues mappe
84 IO SchedulersSoftware staging queues global() argument
[all...]
/linux/drivers/vdpa/alibaba/
H A Deni_vdpa.c45 int queues; member
118 for (i = 0; i < eni_vdpa->queues; i++) { in eni_vdpa_free_irq()
164 int queues = eni_vdpa->queues; in eni_vdpa_request_irq() local
165 int vectors = queues + 1; in eni_vdpa_request_irq()
177 for (i = 0; i < queues; i++) { in eni_vdpa_request_irq()
195 irq = pci_irq_vector(pdev, queues); in eni_vdpa_request_irq()
202 vp_legacy_config_vector(ldev, queues); in eni_vdpa_request_irq()
500 eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa); in eni_vdpa_probe()
502 eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues, in eni_vdpa_probe()
[all...]

12345678910>>...17