Home
last modified time | relevance | path

Searched refs:queue (Results 1 – 25 of 1620) sorted by relevance

12345678910>>...65

/linux/drivers/usb/gadget/function/
H A Duvc_queue.c27 * Video buffers queue management.
33 * the videobuf2 queue operations by serializing calls to videobuf2 and a
34 * spinlock to protect the IRQ queue that holds the buffers to be processed by
39 * videobuf2 queue operations
46 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local
47 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup()
63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local
64 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_buffer_prepare()
100 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_queue() local
127 uvcg_queue_init(struct uvc_video_queue * queue,struct device * dev,enum v4l2_buf_type type,struct mutex * lock) uvcg_queue_init() argument
165 uvcg_free_buffers(struct uvc_video_queue * queue) uvcg_free_buffers() argument
173 uvcg_alloc_buffers(struct uvc_video_queue * queue,struct v4l2_requestbuffers * rb) uvcg_alloc_buffers() argument
183 uvcg_query_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * buf) uvcg_query_buffer() argument
188 uvcg_queue_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * buf) uvcg_queue_buffer() argument
197 uvcg_dequeue_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * buf,int nonblocking) uvcg_dequeue_buffer() argument
209 uvcg_queue_poll(struct uvc_video_queue * queue,struct file * file,poll_table * wait) uvcg_queue_poll() argument
215 uvcg_queue_mmap(struct uvc_video_queue * queue,struct vm_area_struct * vma) uvcg_queue_mmap() argument
226 uvcg_queue_get_unmapped_area(struct uvc_video_queue * queue,unsigned long pgoff) uvcg_queue_get_unmapped_area() argument
245 uvcg_queue_cancel(struct uvc_video_queue * queue,int disconnect) uvcg_queue_cancel() argument
289 uvcg_queue_enable(struct uvc_video_queue * queue,int enable) uvcg_queue_enable() argument
324 uvcg_complete_buffer(struct uvc_video_queue * queue,struct uvc_buffer * buf) uvcg_complete_buffer() argument
343 uvcg_queue_head(struct uvc_video_queue * queue) uvcg_queue_head() argument
[all...]
/linux/drivers/md/dm-vdo/
H A Dfunnel-workqueue.c15 #include "funnel-queue.h"
28 * DOC: Work queue definition.
36 /* Name of just the work queue (e.g., "cpuQ12") */
73 static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue) in as_simple_work_queue() argument
75 return ((queue == NULL) ? in as_simple_work_queue()
76 NULL : container_of(queue, struct simple_work_queue, common)); in as_simple_work_queue()
79 static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue) in as_round_robin_work_queue() argument
81 return ((queue == NULL) ? in as_round_robin_work_queue()
83 container_of(queue, struct round_robin_work_queue, common)); in as_round_robin_work_queue()
96 static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue) in poll_for_completion() argument
110 enqueue_work_queue_completion(struct simple_work_queue * queue,struct vdo_completion * completion) enqueue_work_queue_completion() argument
153 run_start_hook(struct simple_work_queue * queue) run_start_hook() argument
159 run_finish_hook(struct simple_work_queue * queue) run_finish_hook() argument
174 wait_for_next_completion(struct simple_work_queue * queue) wait_for_next_completion() argument
222 process_completion(struct simple_work_queue * queue,struct vdo_completion * completion) process_completion() argument
233 service_work_queue(struct simple_work_queue * queue) service_work_queue() argument
263 struct simple_work_queue *queue = ptr; work_queue_runner() local
272 free_simple_work_queue(struct simple_work_queue * queue) free_simple_work_queue() argument
282 free_round_robin_work_queue(struct round_robin_work_queue * queue) free_round_robin_work_queue() argument
297 vdo_free_work_queue(struct vdo_work_queue * queue) vdo_free_work_queue() argument
316 struct simple_work_queue *queue; make_simple_work_queue() local
385 struct round_robin_work_queue *queue; vdo_make_work_queue() local
443 finish_simple_work_queue(struct simple_work_queue * queue) finish_simple_work_queue() argument
453 finish_round_robin_work_queue(struct round_robin_work_queue * queue) finish_round_robin_work_queue() argument
464 vdo_finish_work_queue(struct vdo_work_queue * queue) vdo_finish_work_queue() argument
477 dump_simple_work_queue(struct simple_work_queue * queue) dump_simple_work_queue() argument
498 vdo_dump_work_queue(struct vdo_work_queue * queue) vdo_dump_work_queue() argument
555 vdo_enqueue_work_queue(struct vdo_work_queue * queue,struct vdo_completion * completion) vdo_enqueue_work_queue() argument
611 struct simple_work_queue *queue = get_current_thread_work_queue(); vdo_get_current_work_queue() local
616 vdo_get_work_queue_owner(struct vdo_work_queue * queue) vdo_get_work_queue_owner() argument
628 struct simple_work_queue *queue = get_current_thread_work_queue(); vdo_get_work_queue_private_data() local
633 vdo_work_queue_type_is(struct vdo_work_queue * queue,const struct vdo_work_queue_type * type) vdo_work_queue_type_is() argument
[all...]
H A Dfunnel-queue.c6 #include "funnel-queue.h"
15 struct funnel_queue *queue; in vdo_make_funnel_queue() local
17 result = vdo_allocate(1, struct funnel_queue, "funnel queue", &queue); in vdo_make_funnel_queue()
22 * Initialize the stub entry and put it in the queue, establishing the invariant that in vdo_make_funnel_queue()
23 * queue->newest and queue->oldest are never null. in vdo_make_funnel_queue()
25 queue->stub.next = NULL; in vdo_make_funnel_queue()
26 queue->newest = &queue in vdo_make_funnel_queue()
33 vdo_free_funnel_queue(struct funnel_queue * queue) vdo_free_funnel_queue() argument
38 get_oldest(struct funnel_queue * queue) get_oldest() argument
103 vdo_funnel_queue_poll(struct funnel_queue * queue) vdo_funnel_queue_poll() argument
137 vdo_is_funnel_queue_empty(struct funnel_queue * queue) vdo_is_funnel_queue_empty() argument
148 vdo_is_funnel_queue_idle(struct funnel_queue * queue) vdo_is_funnel_queue_idle() argument
[all...]
/linux/drivers/net/wireless/st/cw1200/
H A Dqueue.c3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
12 #include "queue.h"
27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument
29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock()
30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock()
32 queue->queue_id); in __cw1200_queue_lock()
33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock()
37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument
39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock()
40 BUG_ON(!queue in __cw1200_queue_unlock()
89 __cw1200_queue_gc(struct cw1200_queue * queue,struct list_head * head,bool unlock) __cw1200_queue_gc() argument
135 struct cw1200_queue *queue = cw1200_queue_gc() local
164 cw1200_queue_init(struct cw1200_queue * queue,struct cw1200_queue_stats * stats,u8 queue_id,size_t capacity,unsigned long ttl) cw1200_queue_init() argument
202 cw1200_queue_clear(struct cw1200_queue * queue) cw1200_queue_clear() argument
244 cw1200_queue_deinit(struct cw1200_queue * queue) cw1200_queue_deinit() argument
256 cw1200_queue_get_num_queued(struct cw1200_queue * queue,u32 link_id_map) cw1200_queue_get_num_queued() argument
280 cw1200_queue_put(struct cw1200_queue * queue,struct sk_buff * skb,struct cw1200_txpriv * txpriv) cw1200_queue_put() argument
331 cw1200_queue_get(struct cw1200_queue * queue,u32 link_id_map,struct wsm_tx ** tx,struct ieee80211_tx_info ** tx_info,const struct cw1200_txpriv ** txpriv) cw1200_queue_get() argument
372 cw1200_queue_requeue(struct cw1200_queue * queue,u32 packet_id) cw1200_queue_requeue() argument
414 cw1200_queue_remove(struct cw1200_queue * queue,u32 packet_id) cw1200_queue_remove() argument
465 cw1200_queue_get_skb(struct cw1200_queue * queue,u32 packet_id,struct sk_buff ** skb,const struct cw1200_txpriv ** txpriv) cw1200_queue_get_skb() argument
495 cw1200_queue_lock(struct cw1200_queue * queue) cw1200_queue_lock() argument
502 cw1200_queue_unlock(struct cw1200_queue * queue) cw1200_queue_unlock() argument
509 cw1200_queue_get_xmit_timestamp(struct cw1200_queue * queue,unsigned long * timestamp,u32 pending_frame_id) cw1200_queue_get_xmit_timestamp() argument
[all...]
/linux/drivers/net/wireless/broadcom/b43legacy/
H A Dpio.c22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument
24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start()
28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument
31 if (queue->need_workarounds) { in tx_octet()
32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet()
33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet()
36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet()
38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet()
63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument
71 if (queue in tx_data()
89 tx_complete(struct b43legacy_pioqueue * queue,struct sk_buff * skb) tx_complete() argument
103 generate_cookie(struct b43legacy_pioqueue * queue,struct b43legacy_pio_txpacket * packet) generate_cookie() argument
141 struct b43legacy_pioqueue *queue = NULL; parse_cookie() local
172 pio_tx_write_fragment(struct b43legacy_pioqueue * queue,struct sk_buff * skb,struct b43legacy_pio_txpacket * packet,size_t txhdr_size) pio_tx_write_fragment() argument
205 struct b43legacy_pioqueue *queue = packet->queue; free_txpacket() local
219 struct b43legacy_pioqueue *queue = packet->queue; pio_tx_packet() local
269 struct b43legacy_pioqueue *queue = from_tasklet(queue, t, txtask); tx_tasklet() local
299 setup_txqueues(struct b43legacy_pioqueue * queue) setup_txqueues() argument
319 struct b43legacy_pioqueue *queue; b43legacy_setup_pioqueue() local
367 cancel_transfers(struct b43legacy_pioqueue * queue) cancel_transfers() argument
379 b43legacy_destroy_pioqueue(struct b43legacy_pioqueue * queue) b43legacy_destroy_pioqueue() argument
409 struct b43legacy_pioqueue *queue; b43legacy_pio_init() local
455 struct b43legacy_pioqueue *queue = dev->pio.queue1; b43legacy_pio_tx() local
477 struct b43legacy_pioqueue *queue; b43legacy_pio_handle_txstatus() local
535 pio_rx_error(struct b43legacy_pioqueue * queue,int clear_buffers,const char * error) pio_rx_error() argument
553 b43legacy_pio_rx(struct b43legacy_pioqueue * queue) b43legacy_pio_rx() argument
634 b43legacy_pio_tx_suspend(struct b43legacy_pioqueue * queue) b43legacy_pio_tx_suspend() argument
642 b43legacy_pio_tx_resume(struct b43legacy_pioqueue * queue) b43legacy_pio_tx_resume() argument
[all...]
/linux/drivers/iio/buffer/
H A Dindustrialio-buffer-dma.c37 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
38 * incoming queue are waiting for the DMA controller to pick them up and fill
39 * them with data. Block on the outgoing queue have been filled with data and
55 * incoming or outgoing queue the block will be freed.
101 struct iio_dma_buffer_queue *queue = block->queue; in iio_buffer_block_release() local
106 dma_free_coherent(queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
109 atomic_dec(&queue->num_dmabufs); in iio_buffer_block_release()
112 iio_buffer_put(&queue in iio_buffer_block_release()
175 iio_dma_buffer_alloc_block(struct iio_dma_buffer_queue * queue,size_t size,bool fileio) iio_dma_buffer_alloc_block() argument
213 iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue * queue) iio_dma_buffer_queue_wake() argument
234 struct iio_dma_buffer_queue *queue = block->queue; iio_dma_buffer_block_done() local
263 iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue * queue,struct list_head * list) iio_dma_buffer_block_list_abort() argument
308 iio_dma_buffer_can_use_fileio(struct iio_dma_buffer_queue * queue) iio_dma_buffer_can_use_fileio() argument
327 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_request_update() local
425 iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue * queue) iio_dma_buffer_fileio_free() argument
448 iio_dma_buffer_submit_block(struct iio_dma_buffer_queue * queue,struct iio_dma_buffer_block * block) iio_dma_buffer_submit_block() argument
496 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_enable() local
522 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_disable() local
535 iio_dma_buffer_enqueue(struct iio_dma_buffer_queue * queue,struct iio_dma_buffer_block * block) iio_dma_buffer_enqueue() argument
549 iio_dma_buffer_dequeue(struct iio_dma_buffer_queue * queue) iio_dma_buffer_dequeue() argument
574 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_io() local
668 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); iio_dma_buffer_usage() local
705 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_attach_dmabuf() local
738 struct iio_dma_buffer_queue *queue = block->queue; iio_dma_can_enqueue_block() local
763 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_enqueue_dmabuf() local
791 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_lock_queue() local
799 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); iio_dma_buffer_unlock_queue() local
851 iio_dma_buffer_init(struct iio_dma_buffer_queue * queue,struct device * dev,const struct iio_dma_buffer_ops * ops) iio_dma_buffer_init() argument
876 iio_dma_buffer_exit(struct iio_dma_buffer_queue * queue) iio_dma_buffer_exit() argument
895 iio_dma_buffer_release(struct iio_dma_buffer_queue * queue) iio_dma_buffer_release() argument
[all...]
/linux/drivers/net/xen-netback/
H A Drx.c42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument
55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots()
58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument
63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available()
68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available()
69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available()
74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available()
80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available()
85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument
90 spin_lock_irqsave(&queue in xenvif_rx_queue_tail()
111 xenvif_rx_dequeue(struct xenvif_queue * queue) xenvif_rx_dequeue() argument
135 xenvif_rx_queue_purge(struct xenvif_queue * queue) xenvif_rx_queue_purge() argument
143 xenvif_rx_queue_drop_expired(struct xenvif_queue * queue) xenvif_rx_queue_drop_expired() argument
159 xenvif_rx_copy_flush(struct xenvif_queue * queue) xenvif_rx_copy_flush() argument
193 xenvif_rx_copy_add(struct xenvif_queue * queue,struct xen_netif_rx_request * req,unsigned int offset,void * data,size_t len) xenvif_rx_copy_add() argument
252 xenvif_rx_next_skb(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt) xenvif_rx_next_skb() argument
328 xenvif_rx_complete(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt) xenvif_rx_complete() argument
355 xenvif_rx_next_chunk(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt,unsigned int offset,void ** data,size_t * len) xenvif_rx_next_chunk() argument
393 xenvif_rx_data_slot(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt,struct xen_netif_rx_request * req,struct xen_netif_rx_response * rsp) xenvif_rx_data_slot() argument
437 xenvif_rx_extra_slot(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt,struct xen_netif_rx_request * req,struct xen_netif_rx_response * rsp) xenvif_rx_extra_slot() argument
461 xenvif_rx_skb(struct xenvif_queue * queue) xenvif_rx_skb() argument
491 xenvif_rx_action(struct xenvif_queue * queue) xenvif_rx_action() argument
510 xenvif_rx_queue_slots(const struct xenvif_queue * queue) xenvif_rx_queue_slots() argument
520 xenvif_rx_queue_stalled(const struct xenvif_queue * queue) xenvif_rx_queue_stalled() argument
530 xenvif_rx_queue_ready(struct xenvif_queue * queue) xenvif_rx_queue_ready() argument
537 xenvif_have_rx_work(struct xenvif_queue * queue,bool test_kthread) xenvif_have_rx_work() argument
547 xenvif_rx_queue_timeout(struct xenvif_queue * queue) xenvif_rx_queue_timeout() argument
570 xenvif_wait_for_rx_work(struct xenvif_queue * queue) xenvif_wait_for_rx_work() argument
595 xenvif_queue_carrier_off(struct xenvif_queue * queue) xenvif_queue_carrier_off() argument
610 xenvif_queue_carrier_on(struct xenvif_queue * queue) xenvif_queue_carrier_on() argument
628 struct xenvif_queue *queue = data; xenvif_kthread_guest_rx() local
[all...]
H A Dnetback.c59 /* The time that packets can stay on the guest Rx internal queue
107 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
110 static void make_tx_response(struct xenvif_queue *queue,
115 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
117 static inline int tx_work_todo(struct xenvif_queue *queue);
119 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument
122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
125 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument
128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr()
161 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument
166 xenvif_napi_schedule_or_enable_events(struct xenvif_queue * queue) xenvif_napi_schedule_or_enable_events() argument
180 tx_add_credit(struct xenvif_queue * queue) tx_add_credit() argument
201 struct xenvif_queue *queue = timer_container_of(queue, t, xenvif_tx_credit_callback() local
207 xenvif_tx_err(struct xenvif_queue * queue,struct xen_netif_tx_request * txp,unsigned int extra_count,RING_IDX end) xenvif_tx_err() argument
232 xenvif_count_requests(struct xenvif_queue * queue,struct xen_netif_tx_request * first,unsigned int extra_count,struct xen_netif_tx_request * txp,int work_to_do) xenvif_count_requests() argument
341 xenvif_tx_create_map_op(struct xenvif_queue * queue,u16 pending_idx,struct xen_netif_tx_request * txp,unsigned int extra_count,struct gnttab_map_grant_ref * mop) xenvif_tx_create_map_op() argument
376 xenvif_get_requests(struct xenvif_queue * queue,struct sk_buff * skb,struct xen_netif_tx_request * first,struct xen_netif_tx_request * txfrags,unsigned * copy_ops,unsigned * map_ops,unsigned int frag_overflow,struct sk_buff * nskb,unsigned int extra_count,unsigned int data_len) xenvif_get_requests() argument
524 xenvif_grant_handle_set(struct xenvif_queue * queue,u16 pending_idx,grant_handle_t handle) xenvif_grant_handle_set() argument
538 xenvif_grant_handle_reset(struct xenvif_queue * queue,u16 pending_idx) xenvif_grant_handle_reset() argument
551 xenvif_tx_check_gop(struct xenvif_queue * queue,struct sk_buff * skb,struct gnttab_map_grant_ref ** gopp_map,struct gnttab_copy ** gopp_copy) xenvif_tx_check_gop() argument
688 xenvif_fill_frags(struct xenvif_queue * queue,struct sk_buff * skb) xenvif_fill_frags() argument
726 xenvif_get_extras(struct xenvif_queue * queue,struct xen_netif_extra_info * extras,unsigned int * extra_count,int work_to_do) xenvif_get_extras() argument
789 checksum_setup(struct xenvif_queue * queue,struct sk_buff * skb) checksum_setup() argument
811 tx_credit_exceeded(struct xenvif_queue * queue,unsigned size) tx_credit_exceeded() argument
916 xenvif_tx_build_gops(struct xenvif_queue * queue,int budget,unsigned * copy_ops,unsigned * map_ops) xenvif_tx_build_gops() argument
1112 xenvif_handle_frag_list(struct xenvif_queue * queue,struct sk_buff * skb) xenvif_handle_frag_list() argument
1172 xenvif_tx_submit(struct xenvif_queue * queue) xenvif_tx_submit() argument
1290 struct xenvif_queue *queue = ubuf_to_queue(ubuf); xenvif_zerocopy_callback() local
1322 xenvif_tx_dealloc_action(struct xenvif_queue * queue) xenvif_tx_dealloc_action() argument
1390 xenvif_tx_action(struct xenvif_queue * queue,int budget) xenvif_tx_action() argument
1425 _make_tx_response(struct xenvif_queue * queue,const struct xen_netif_tx_request * txp,unsigned int extra_count,s8 status) _make_tx_response() argument
1443 push_tx_responses(struct xenvif_queue * queue) push_tx_responses() argument
1452 xenvif_idx_release(struct xenvif_queue * queue,u16 pending_idx,s8 status) xenvif_idx_release() argument
1478 make_tx_response(struct xenvif_queue * queue,const struct xen_netif_tx_request * txp,unsigned int extra_count,s8 status) make_tx_response() argument
1493 xenvif_idx_unmap(struct xenvif_queue * queue,u16 pending_idx) xenvif_idx_unmap() argument
1518 tx_work_todo(struct xenvif_queue * queue) tx_work_todo() argument
1526 tx_dealloc_work_todo(struct xenvif_queue * queue) tx_dealloc_work_todo() argument
1531 xenvif_unmap_frontend_data_rings(struct xenvif_queue * queue) xenvif_unmap_frontend_data_rings() argument
1541 xenvif_map_frontend_data_rings(struct xenvif_queue * queue,grant_ref_t tx_ring_ref,grant_ref_t rx_ring_ref) xenvif_map_frontend_data_rings() argument
1588 xenvif_dealloc_kthread_should_stop(struct xenvif_queue * queue) xenvif_dealloc_kthread_should_stop() argument
1599 struct xenvif_queue *queue = data; xenvif_dealloc_kthread() local
[all...]
H A Dinterface.c44 /* Number of bytes allowed on the internal guest Rx queue. */
52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument
56 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare()
59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument
61 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete()
67 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete()
77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument
81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt()
83 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt()
89 struct xenvif_queue *queue in xenvif_tx_interrupt() local
105 struct xenvif_queue *queue = xenvif_poll() local
132 xenvif_handle_rx_interrupt(struct xenvif_queue * queue) xenvif_handle_rx_interrupt() argument
144 struct xenvif_queue *queue = dev_id; xenvif_rx_interrupt() local
160 struct xenvif_queue *queue = dev_id; xenvif_interrupt() local
208 struct xenvif_queue *queue = NULL; xenvif_start_xmit() local
274 struct xenvif_queue *queue = NULL; xenvif_get_stats() local
306 struct xenvif_queue *queue = NULL; xenvif_up() local
322 struct xenvif_queue *queue = NULL; xenvif_down() local
561 xenvif_init_queue(struct xenvif_queue * queue) xenvif_init_queue() argument
671 xenvif_disconnect_queue(struct xenvif_queue * queue) xenvif_disconnect_queue() argument
703 xenvif_connect_data(struct xenvif_queue * queue,unsigned long tx_ring_ref,unsigned long rx_ring_ref,unsigned int tx_evtchn,unsigned int rx_evtchn) xenvif_connect_data() argument
804 struct xenvif_queue *queue = NULL; xenvif_disconnect_data() local
838 xenvif_deinit_queue(struct xenvif_queue * queue) xenvif_deinit_queue() argument
[all...]
/linux/drivers/media/usb/uvc/
H A Duvc_queue.c24 * Video buffers queue management.
30 * the videobuf2 queue operations by serializing calls to videobuf2 and a
31 * spinlock to protect the IRQ queue that holds the buffers to be processed by
43 * This function must be called with the queue spinlock held.
45 static void __uvc_queue_return_buffers(struct uvc_video_queue *queue, in __uvc_queue_return_buffers() argument
52 lockdep_assert_held(&queue->irqlock); in __uvc_queue_return_buffers()
54 while (!list_empty(&queue->irqqueue)) { in __uvc_queue_return_buffers()
55 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, in __uvc_queue_return_buffers()
57 queue); in __uvc_queue_return_buffers()
58 list_del(&buf->queue); in __uvc_queue_return_buffers()
64 uvc_queue_return_buffers(struct uvc_video_queue * queue,enum uvc_buffer_state state) uvc_queue_return_buffers() argument
80 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_queue_setup() local
111 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_prepare() local
139 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_queue() local
162 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_finish() local
172 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_start_streaming_video() local
197 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_stop_streaming_video() local
211 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_stop_streaming_meta() local
239 uvc_queue_init(struct uvc_video_queue * queue,enum v4l2_buf_type type) uvc_queue_init() argument
289 uvc_queue_cancel(struct uvc_video_queue * queue,int disconnect) uvc_queue_cancel() argument
314 __uvc_queue_get_current_buffer(struct uvc_video_queue * queue) __uvc_queue_get_current_buffer() argument
322 uvc_queue_get_current_buffer(struct uvc_video_queue * queue) uvc_queue_get_current_buffer() argument
341 uvc_queue_buffer_requeue(struct uvc_video_queue * queue,struct uvc_buffer * buf) uvc_queue_buffer_requeue() argument
356 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_queue_buffer_complete() local
383 uvc_queue_next_buffer(struct uvc_video_queue * queue,struct uvc_buffer * buf) uvc_queue_next_buffer() argument
[all...]
/linux/drivers/md/dm-vdo/indexer/
H A Dfunnel-requestqueue.c12 #include "funnel-queue.h"
18 * This queue will attempt to handle requests in reasonably sized batches instead of reacting
22 * If the wait time becomes long enough, the queue will become dormant and must be explicitly
24 * queue via xchg (which is a memory barrier), and later checks "dormant" to decide whether to do a
28 * decide if the funnel queue is idle. In dormant mode, the last examination of "newest" before
31 * queue's "next" field update isn't visible yet to make the entry accessible, its existence will
35 * the queue to awaken immediately.
50 /* Wait queue for synchronizing producers and consumer */
68 static inline struct uds_request *poll_queues(struct uds_request_queue *queue) in poll_queues() argument
72 entry = vdo_funnel_queue_poll(queue in poll_queues()
83 are_queues_idle(struct uds_request_queue * queue) are_queues_idle() argument
94 dequeue_request(struct uds_request_queue * queue,struct uds_request ** request_ptr,bool * waited_ptr) dequeue_request() argument
115 wait_for_request(struct uds_request_queue * queue,bool dormant,unsigned long timeout,struct uds_request ** request,bool * waited) wait_for_request() argument
133 struct uds_request_queue *queue = arg; request_queue_worker() local
199 struct uds_request_queue *queue; uds_make_request_queue() local
234 wake_up_worker(struct uds_request_queue * queue) wake_up_worker() argument
240 uds_request_queue_enqueue(struct uds_request_queue * queue,struct uds_request * request) uds_request_queue_enqueue() argument
257 uds_request_queue_finish(struct uds_request_queue * queue) uds_request_queue_finish() argument
[all...]
/linux/drivers/misc/genwqe/
H A Dcard_ddcb.c14 * Device Driver Control Block (DDCB) queue support. Definition of
15 * interrupt handlers for queue support as well as triggering the
40 * Situation (1): Empty queue
82 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument
84 return queue->ddcb_next == queue->ddcb_act; in queue_empty()
87 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument
89 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs()
90 return queue in queue_enqueued_ddcbs()
95 queue_free_ddcbs(struct ddcb_queue * queue) queue_free_ddcbs() argument
163 print_ddcb_info(struct genwqe_dev * cd,struct ddcb_queue * queue) print_ddcb_info() argument
265 enqueue_ddcb(struct genwqe_dev * cd,struct ddcb_queue * queue,struct ddcb * pddcb,int ddcb_no) enqueue_ddcb() argument
332 struct ddcb_queue *queue = req->queue; copy_ddcb_results() local
366 genwqe_check_ddcb_queue(struct genwqe_dev * cd,struct ddcb_queue * queue) genwqe_check_ddcb_queue() argument
481 struct ddcb_queue *queue; __genwqe_wait_ddcb() local
506 struct ddcb_queue *queue = req->queue; __genwqe_wait_ddcb() local
568 get_next_ddcb(struct genwqe_dev * cd,struct ddcb_queue * queue,int * num) get_next_ddcb() argument
625 struct ddcb_queue *queue = req->queue; __genwqe_purge_ddcb() local
759 struct ddcb_queue *queue; __genwqe_enqueue_ddcb() local
981 struct ddcb_queue *queue = &cd->queue; genwqe_next_ddcb_ready() local
1012 struct ddcb_queue *queue = &cd->queue; genwqe_ddcbs_in_flight() local
1021 setup_ddcb_queue(struct genwqe_dev * cd,struct ddcb_queue * queue) setup_ddcb_queue() argument
1098 ddcb_queue_initialized(struct ddcb_queue * queue) ddcb_queue_initialized() argument
1103 free_ddcb_queue(struct genwqe_dev * cd,struct ddcb_queue * queue) free_ddcb_queue() argument
1231 struct ddcb_queue *queue; genwqe_setup_service_layer() local
1320 struct ddcb_queue *queue = &cd->queue; queue_wake_up_all() local
1347 struct ddcb_queue *queue = &cd->queue; genwqe_finish_queue() local
[all...]
/linux/drivers/infiniband/hw/mana/
H A Dshadow_queue.h33 /* queue size in wqes */
41 static inline int create_shadow_queue(struct shadow_queue *queue, uint32_t length, uint32_t stride) in create_shadow_queue() argument
43 queue->buffer = kvmalloc_array(length, stride, GFP_KERNEL); in create_shadow_queue()
44 if (!queue->buffer) in create_shadow_queue()
47 queue->length = length; in create_shadow_queue()
48 queue->stride = stride; in create_shadow_queue()
53 static inline void destroy_shadow_queue(struct shadow_queue *queue) in destroy_shadow_queue() argument
55 kvfree(queue->buffer); in destroy_shadow_queue()
58 static inline bool shadow_queue_full(struct shadow_queue *queue) in shadow_queue_full() argument
60 return (queue in shadow_queue_full()
63 shadow_queue_empty(struct shadow_queue * queue) shadow_queue_empty() argument
69 shadow_queue_get_element(const struct shadow_queue * queue,u64 unmasked_index) shadow_queue_get_element() argument
77 shadow_queue_producer_entry(struct shadow_queue * queue) shadow_queue_producer_entry() argument
83 shadow_queue_get_next_to_consume(const struct shadow_queue * queue) shadow_queue_get_next_to_consume() argument
92 shadow_queue_get_next_to_complete(struct shadow_queue * queue) shadow_queue_get_next_to_complete() argument
100 shadow_queue_advance_producer(struct shadow_queue * queue) shadow_queue_advance_producer() argument
105 shadow_queue_advance_consumer(struct shadow_queue * queue) shadow_queue_advance_consumer() argument
110 shadow_queue_advance_next_to_complete(struct shadow_queue * queue) shadow_queue_advance_next_to_complete() argument
[all...]
/linux/drivers/net/
H A Dxen-netfront.c94 /* IRQ name is queue name with "-tx" or "-rx" appended */
166 /* Multi-queue support */
218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument
222 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb()
223 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb()
227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument
231 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref()
232 queue->grant_rx_ref[i] = INVALID_GRANT_REF; in xennet_get_rx_ref()
248 struct netfront_queue *queue = timer_container_of(queue, in rx_refill_timeout() local
253 netfront_tx_slot_available(struct netfront_queue * queue) netfront_tx_slot_available() argument
259 xennet_maybe_wake_tx(struct netfront_queue * queue) xennet_maybe_wake_tx() argument
271 xennet_alloc_one_rx_buffer(struct netfront_queue * queue) xennet_alloc_one_rx_buffer() argument
299 xennet_alloc_rx_buffers(struct netfront_queue * queue) xennet_alloc_rx_buffers() argument
366 struct netfront_queue *queue = NULL; xennet_open() local
390 xennet_tx_buf_gc(struct netfront_queue * queue) xennet_tx_buf_gc() argument
466 struct netfront_queue *queue; global() member
483 struct netfront_queue *queue = info->queue; xennet_tx_setup_grant() local
604 xennet_mark_tx_pending(struct netfront_queue * queue) xennet_mark_tx_pending() argument
614 xennet_xdp_xmit_one(struct net_device * dev,struct netfront_queue * queue,struct xdp_frame * xdpf) xennet_xdp_xmit_one() argument
649 struct netfront_queue *queue = NULL; xennet_xdp_xmit() local
718 struct netfront_queue *queue = NULL; xennet_start_xmit() local
868 struct netfront_queue *queue; xennet_close() local
885 struct netfront_queue *queue = &info->queues[i]; xennet_destroy_queues() local
902 xennet_set_rx_rsp_cons(struct netfront_queue * queue,RING_IDX val) xennet_set_rx_rsp_cons() argument
912 xennet_move_rx_slot(struct netfront_queue * queue,struct sk_buff * skb,grant_ref_t ref) xennet_move_rx_slot() argument
925 xennet_get_extras(struct netfront_queue * queue,struct xen_netif_extra_info * extras,RING_IDX rp) xennet_get_extras() argument
967 xennet_run_xdp(struct netfront_queue * queue,struct page * pdata,struct xen_netif_rx_response * rx,struct bpf_prog * prog,struct xdp_buff * xdp,bool * need_xdp_flush) xennet_run_xdp() argument
1021 xennet_get_responses(struct netfront_queue * queue,struct netfront_rx_info * rinfo,RING_IDX rp,struct sk_buff_head * list,bool * need_xdp_flush) xennet_get_responses() argument
1164 xennet_fill_frags(struct netfront_queue * queue,struct sk_buff * skb,struct sk_buff_head * list) xennet_fill_frags() argument
1227 handle_incoming_queue(struct netfront_queue * queue,struct sk_buff_head * rxq) handle_incoming_queue() argument
1265 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); xennet_poll() local
1423 xennet_release_tx_bufs(struct netfront_queue * queue) xennet_release_tx_bufs() argument
1445 xennet_release_rx_bufs(struct netfront_queue * queue) xennet_release_rx_bufs() argument
1514 xennet_handle_tx(struct netfront_queue * queue,unsigned int * eoi) xennet_handle_tx() argument
1539 xennet_handle_rx(struct netfront_queue * queue,unsigned int * eoi) xennet_handle_rx() argument
1823 struct netfront_queue *queue = &info->queues[i]; xennet_disconnect_backend() local
1905 setup_netfront_single(struct netfront_queue * queue) setup_netfront_single() argument
1931 setup_netfront_split(struct netfront_queue * queue) setup_netfront_split() argument
1976 setup_netfront(struct xenbus_device * dev,struct netfront_queue * queue,unsigned int feature_split_evtchn) setup_netfront() argument
2026 xennet_init_queue(struct netfront_queue * queue) xennet_init_queue() argument
2082 write_queue_xenstore_keys(struct netfront_queue * queue,struct xenbus_transaction * xbt,int write_hierarchical) write_queue_xenstore_keys() argument
2166 xennet_create_page_pool(struct netfront_queue * queue) xennet_create_page_pool() argument
2221 struct netfront_queue *queue = &info->queues[i]; xennet_create_queues() local
2266 struct netfront_queue *queue = NULL; talk_to_netback() local
2430 struct netfront_queue *queue = NULL; xennet_connect() local
[all...]
/linux/fs/fuse/
H A Ddev_uring.c50 static void fuse_uring_flush_bg(struct fuse_ring_queue *queue) in fuse_uring_flush_bg() argument
52 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg()
55 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg()
59 * Allow one bg request per queue, ignoring global fc limits. in fuse_uring_flush_bg()
60 * This prevents a single queue from consuming all resources and in fuse_uring_flush_bg()
61 * eliminates the need for remote queue wake-ups when global in fuse_uring_flush_bg()
62 * limits are met but this queue has no more waiting requests. in fuse_uring_flush_bg()
65 !queue->active_background) && in fuse_uring_flush_bg()
66 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg()
69 req = list_first_entry(&queue in fuse_uring_flush_bg()
81 struct fuse_ring_queue *queue = ent->queue; fuse_uring_req_end() local
105 fuse_uring_abort_end_queue_requests(struct fuse_ring_queue * queue) fuse_uring_abort_end_queue_requests() argument
123 struct fuse_ring_queue *queue; fuse_uring_abort_end_requests() local
161 struct fuse_ring_queue *queue; fuse_uring_request_expired() local
195 struct fuse_ring_queue *queue = ring->queues[qid]; fuse_uring_destruct() local
272 struct fuse_ring_queue *queue; fuse_uring_create_queue() local
331 struct fuse_ring_queue *queue = ent->queue; fuse_uring_entry_teardown() local
361 fuse_uring_stop_list_entries(struct list_head * head,struct fuse_ring_queue * queue,enum fuse_ring_req_state exp_state) fuse_uring_stop_list_entries() argument
390 fuse_uring_teardown_entries(struct fuse_ring_queue * queue) fuse_uring_teardown_entries() argument
407 struct fuse_ring_queue *queue = ring->queues[qid]; fuse_uring_log_ent_state() local
438 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); fuse_uring_async_stop_queues() local
473 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); fuse_uring_stop_queues() local
502 struct fuse_ring_queue *queue; fuse_uring_cancel() local
665 struct fuse_ring_queue *queue = ent->queue; fuse_uring_copy_to_ring() local
721 struct fuse_ring_queue *queue = ent->queue; fuse_uring_send_next_to_ring() local
744 fuse_uring_ent_avail(struct fuse_ring_ent * ent,struct fuse_ring_queue * queue) fuse_uring_ent_avail() argument
755 struct fuse_ring_queue *queue = ent->queue; fuse_uring_add_to_pq() local
770 struct fuse_ring_queue *queue = ent->queue; fuse_uring_add_req_to_ring_ent() local
792 struct fuse_ring_queue *queue = ent->queue; fuse_uring_ent_assign_req() local
839 fuse_uring_next_fuse_req(struct fuse_ring_ent * ent,struct fuse_ring_queue * queue,unsigned int issue_flags) fuse_uring_next_fuse_req() argument
860 struct fuse_ring_queue *queue = ent->queue; fuse_ring_ent_set_commit() local
881 struct fuse_ring_queue *queue; fuse_uring_commit_fetch() local
951 struct fuse_ring_queue *queue; is_ring_ready() local
980 struct fuse_ring_queue *queue = ent->queue; fuse_uring_do_register() local
1031 fuse_uring_create_ring_ent(struct io_uring_cmd * cmd,struct fuse_ring_queue * queue) fuse_uring_create_ring_ent() argument
1083 struct fuse_ring_queue *queue; fuse_uring_register() local
1195 struct fuse_ring_queue *queue = ent->queue; fuse_uring_send() local
1215 struct fuse_ring_queue *queue = ent->queue; fuse_uring_send_in_task() local
1234 struct fuse_ring_queue *queue; fuse_uring_task_to_queue() local
1262 struct fuse_ring_queue *queue; fuse_uring_queue_fuse_req() local
1306 struct fuse_ring_queue *queue; fuse_uring_queue_bq_req() local
1353 struct fuse_ring_queue *queue = req->ring_queue; fuse_uring_remove_pending_req() local
[all...]
/linux/drivers/net/wireguard/
H A Dqueueing.c25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, in wg_packet_queue_init() argument
30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init()
31 queue->last_cpu = -1; in wg_packet_queue_init()
32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); in wg_packet_queue_init()
35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init()
36 if (!queue->worker) { in wg_packet_queue_init()
37 ptr_ring_cleanup(&queue->ring, NULL); in wg_packet_queue_init()
43 void wg_packet_queue_free(struct crypt_queue *queue, boo argument
51 STUB(queue) global() argument
53 wg_prev_queue_init(struct prev_queue * queue) wg_prev_queue_init() argument
66 __wg_prev_queue_enqueue(struct prev_queue * queue,struct sk_buff * skb) __wg_prev_queue_enqueue() argument
72 wg_prev_queue_enqueue(struct prev_queue * queue,struct sk_buff * skb) wg_prev_queue_enqueue() argument
80 wg_prev_queue_dequeue(struct prev_queue * queue) wg_prev_queue_dequeue() argument
[all...]
/linux/drivers/nvme/host/
H A Dtcp.c106 struct nvme_tcp_queue *queue; member
207 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
214 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument
216 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
233 * Check if the queue is TLS encrypted
235 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue) in nvme_tcp_queue_tls() argument
240 return queue->tls_enabled; in nvme_tcp_queue_tls()
254 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument
256 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset()
263 nvme_tcp_hdgst_len(struct nvme_tcp_queue * queue) nvme_tcp_hdgst_len() argument
268 nvme_tcp_ddgst_len(struct nvme_tcp_queue * queue) nvme_tcp_ddgst_len() argument
383 nvme_tcp_send_all(struct nvme_tcp_queue * queue) nvme_tcp_send_all() argument
393 nvme_tcp_queue_has_pending(struct nvme_tcp_queue * queue) nvme_tcp_queue_has_pending() argument
399 nvme_tcp_queue_more(struct nvme_tcp_queue * queue) nvme_tcp_queue_more() argument
408 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_queue_request() local
429 nvme_tcp_process_req_list(struct nvme_tcp_queue * queue) nvme_tcp_process_req_list() argument
441 nvme_tcp_fetch_request(struct nvme_tcp_queue * queue) nvme_tcp_fetch_request() argument
494 nvme_tcp_verify_hdgst(struct nvme_tcp_queue * queue,void * pdu,size_t pdu_len) nvme_tcp_verify_hdgst() argument
520 nvme_tcp_check_ddgst(struct nvme_tcp_queue * queue,void * pdu) nvme_tcp_check_ddgst() argument
556 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; nvme_tcp_init_request() local
579 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; nvme_tcp_init_hctx() local
589 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_init_admin_hctx() local
596 nvme_tcp_recv_state(struct nvme_tcp_queue * queue) nvme_tcp_recv_state() argument
603 nvme_tcp_init_recv_ctx(struct nvme_tcp_queue * queue) nvme_tcp_init_recv_ctx() argument
621 nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue * queue,struct nvme_completion * cqe) nvme_tcp_process_nvme_cqe() argument
647 nvme_tcp_handle_c2h_data(struct nvme_tcp_queue * queue,struct nvme_tcp_data_pdu * pdu) nvme_tcp_handle_c2h_data() argument
681 nvme_tcp_handle_comp(struct nvme_tcp_queue * queue,struct nvme_tcp_rsp_pdu * pdu) nvme_tcp_handle_comp() argument
706 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_setup_h2c_data_pdu() local
737 nvme_tcp_handle_r2t(struct nvme_tcp_queue * queue,struct nvme_tcp_r2t_pdu * pdu) nvme_tcp_handle_r2t() argument
796 nvme_tcp_handle_c2h_term(struct nvme_tcp_queue * queue,struct nvme_tcp_term_pdu * pdu) nvme_tcp_handle_c2h_term() argument
830 nvme_tcp_recv_pdu(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len) nvme_tcp_recv_pdu() argument
910 nvme_tcp_recv_data(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len) nvme_tcp_recv_data() argument
981 nvme_tcp_recv_ddgst(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len) nvme_tcp_recv_ddgst() argument
1029 struct nvme_tcp_queue *queue = desc->arg.data; nvme_tcp_recv_skb() local
1064 struct nvme_tcp_queue *queue; nvme_tcp_data_ready() local
1078 struct nvme_tcp_queue *queue; nvme_tcp_write_space() local
1091 struct nvme_tcp_queue *queue; nvme_tcp_state_change() local
1117 nvme_tcp_done_send_req(struct nvme_tcp_queue * queue) nvme_tcp_done_send_req() argument
1137 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_data() local
1200 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_cmd_pdu() local
1241 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_data_pdu() local
1275 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_ddgst() local
1306 nvme_tcp_try_send(struct nvme_tcp_queue * queue) nvme_tcp_try_send() argument
1356 nvme_tcp_try_recv(struct nvme_tcp_queue * queue) nvme_tcp_try_recv() argument
1374 struct nvme_tcp_queue *queue = nvme_tcp_io_work() local
1419 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_alloc_async_req() local
1436 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_free_queue() local
1455 nvme_tcp_init_connection(struct nvme_tcp_queue * queue) nvme_tcp_init_connection() argument
1586 nvme_tcp_admin_queue(struct nvme_tcp_queue * queue) nvme_tcp_admin_queue() argument
1591 nvme_tcp_default_queue(struct nvme_tcp_queue * queue) nvme_tcp_default_queue() argument
1600 nvme_tcp_read_queue(struct nvme_tcp_queue * queue) nvme_tcp_read_queue() argument
1611 nvme_tcp_poll_queue(struct nvme_tcp_queue * queue) nvme_tcp_poll_queue() argument
1633 nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue * queue) nvme_tcp_set_queue_io_cpu() argument
1678 struct nvme_tcp_queue *queue = data; nvme_tcp_tls_done() local
1709 nvme_tcp_start_tls(struct nvme_ctrl * nctrl,struct nvme_tcp_queue * queue,key_serial_t pskid) nvme_tcp_start_tls() argument
1765 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_alloc_queue() local
1909 nvme_tcp_restore_sock_ops(struct nvme_tcp_queue * queue) nvme_tcp_restore_sock_ops() argument
1921 __nvme_tcp_stop_queue(struct nvme_tcp_queue * queue) __nvme_tcp_stop_queue() argument
1931 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_stop_queue_nowait() local
1950 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_wait_queue() local
1972 nvme_tcp_setup_sock_ops(struct nvme_tcp_queue * queue) nvme_tcp_setup_sock_ops() argument
1991 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; nvme_tcp_start_queue() local
2573 nvme_tcp_set_sg_inline(struct nvme_tcp_queue * queue,struct nvme_command * c,u32 data_len) nvme_tcp_set_sg_inline() argument
2597 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_submit_async_event() local
2672 nvme_tcp_map_data(struct nvme_tcp_queue * queue,struct request * rq) nvme_tcp_map_data() argument
2697 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_setup_cmd_pdu() local
2748 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_commit_rqs() local
2758 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_queue_rq() local
2787 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_poll() local
2804 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; nvme_tcp_get_address() local
[all...]
H A Drdma.c73 struct nvme_rdma_queue *queue; member
159 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) in nvme_rdma_queue_idx() argument
161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
164 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) in nvme_rdma_poll_queue() argument
166 return nvme_rdma_queue_idx(queue) > in nvme_rdma_poll_queue()
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
168 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
171 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) in nvme_rdma_inline_data_size() argument
173 return queue in nvme_rdma_inline_data_size()
246 nvme_rdma_wait_for_cm(struct nvme_rdma_queue * queue) nvme_rdma_wait_for_cm() argument
257 nvme_rdma_create_qp(struct nvme_rdma_queue * queue,const int factor) nvme_rdma_create_qp() argument
300 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; nvme_rdma_init_request() local
323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; nvme_rdma_init_hctx() local
335 struct nvme_rdma_queue *queue = &ctrl->queues[0]; nvme_rdma_init_admin_hctx() local
413 nvme_rdma_free_cq(struct nvme_rdma_queue * queue) nvme_rdma_free_cq() argument
421 nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue * queue) nvme_rdma_destroy_queue_ib() argument
463 nvme_rdma_create_cq(struct ib_device * ibdev,struct nvme_rdma_queue * queue) nvme_rdma_create_cq() argument
489 nvme_rdma_create_queue_ib(struct nvme_rdma_queue * queue) nvme_rdma_create_queue_ib() argument
572 struct nvme_rdma_queue *queue; nvme_rdma_alloc_queue() local
633 __nvme_rdma_stop_queue(struct nvme_rdma_queue * queue) __nvme_rdma_stop_queue() argument
639 nvme_rdma_stop_queue(struct nvme_rdma_queue * queue) nvme_rdma_stop_queue() argument
650 nvme_rdma_free_queue(struct nvme_rdma_queue * queue) nvme_rdma_free_queue() argument
678 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; nvme_rdma_start_queue() local
1170 struct nvme_rdma_queue *queue = wc->qp->qp_context; nvme_rdma_wr_error() local
1198 nvme_rdma_inv_rkey(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req) nvme_rdma_inv_rkey() argument
1231 nvme_rdma_unmap_data(struct nvme_rdma_queue * queue,struct request * rq) nvme_rdma_unmap_data() argument
1264 nvme_rdma_map_sg_inline(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c,int count) nvme_rdma_map_sg_inline() argument
1290 nvme_rdma_map_sg_single(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c) nvme_rdma_map_sg_single() argument
1302 nvme_rdma_map_sg_fr(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c,int count) nvme_rdma_map_sg_fr() argument
1406 nvme_rdma_map_sg_pi(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c,int count,int pi_count) nvme_rdma_map_sg_pi() argument
1525 nvme_rdma_map_data(struct nvme_rdma_queue * queue,struct request * rq,struct nvme_command * c) nvme_rdma_map_data() argument
1591 nvme_rdma_post_send(struct nvme_rdma_queue * queue,struct nvme_rdma_qe * qe,struct ib_sge * sge,u32 num_sge,struct ib_send_wr * first) nvme_rdma_post_send() argument
1622 nvme_rdma_post_recv(struct nvme_rdma_queue * queue,struct nvme_rdma_qe * qe) nvme_rdma_post_recv() argument
1648 nvme_rdma_tagset(struct nvme_rdma_queue * queue) nvme_rdma_tagset() argument
1666 struct nvme_rdma_queue *queue = &ctrl->queues[0]; nvme_rdma_submit_async_event() local
1690 nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue * queue,struct nvme_completion * cqe,struct ib_wc * wc) nvme_rdma_process_nvme_rsp() argument
1738 struct nvme_rdma_queue *queue = wc->qp->qp_context; nvme_rdma_recv_done() local
1774 nvme_rdma_conn_established(struct nvme_rdma_queue * queue) nvme_rdma_conn_established() argument
1787 nvme_rdma_conn_rejected(struct nvme_rdma_queue * queue,struct rdma_cm_event * ev) nvme_rdma_conn_rejected() argument
1813 nvme_rdma_addr_resolved(struct nvme_rdma_queue * queue) nvme_rdma_addr_resolved() argument
1838 nvme_rdma_route_resolved(struct nvme_rdma_queue * queue) nvme_rdma_route_resolved() argument
1889 struct nvme_rdma_queue *queue = cm_id->context; nvme_rdma_cm_handler() local
1947 struct nvme_rdma_queue *queue = req->queue; nvme_rdma_complete_timed_out() local
1956 struct nvme_rdma_queue *queue = req->queue; nvme_rdma_timeout() local
1996 struct nvme_rdma_queue *queue = hctx->driver_data; nvme_rdma_queue_rq() local
2075 struct nvme_rdma_queue *queue = hctx->driver_data; nvme_rdma_poll() local
2114 struct nvme_rdma_queue *queue = req->queue; nvme_rdma_complete_rq() local
[all...]
/linux/drivers/gpu/drm/imagination/
H A Dpvr_queue.c86 WARN(1, "Invalid queue type"); in get_ctx_state_size()
116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release_work()
123 struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev; in pvr_queue_fence_release()
133 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name()
147 WARN(1, "Invalid queue type"); in pvr_queue_job_fence_get_timeline_name()
156 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name()
170 WARN(1, "Invalid queue type"); in pvr_queue_cccb_fence_get_timeline_name()
262 * @queue: The queue this fence belongs to.
267 * pvr_queue_fence::queue fiel
271 pvr_queue_fence_init(struct dma_fence * f,struct pvr_queue * queue,const struct dma_fence_ops * fence_ops,struct pvr_queue_fence_ctx * fence_ctx) pvr_queue_fence_init() argument
296 pvr_queue_cccb_fence_init(struct dma_fence * fence,struct pvr_queue * queue) pvr_queue_cccb_fence_init() argument
314 pvr_queue_job_fence_init(struct dma_fence * fence,struct pvr_queue * queue) pvr_queue_job_fence_init() argument
398 pvr_queue_get_job_cccb_fence(struct pvr_queue * queue,struct pvr_job * job) pvr_queue_get_job_cccb_fence() argument
451 pvr_queue_get_job_kccb_fence(struct pvr_queue * queue,struct pvr_job * job) pvr_queue_get_job_kccb_fence() argument
471 pvr_queue_get_paired_frag_job_dep(struct pvr_queue * queue,struct pvr_job * job) pvr_queue_get_paired_frag_job_dep() argument
509 struct pvr_queue *queue = container_of(s_entity, struct pvr_queue, entity); pvr_queue_prepare_job() local
570 pvr_queue_update_active_state_locked(struct pvr_queue * queue) pvr_queue_update_active_state_locked() argument
602 pvr_queue_update_active_state(struct pvr_queue * queue) pvr_queue_update_active_state() argument
613 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); pvr_queue_submit_job_to_cccb() local
755 struct pvr_queue *queue = container_of(job->base.sched, pvr_queue_run_job() local
766 pvr_queue_stop(struct pvr_queue * queue,struct pvr_job * bad_job) pvr_queue_stop() argument
771 pvr_queue_start(struct pvr_queue * queue) pvr_queue_start() argument
812 struct pvr_queue *queue = container_of(sched, struct pvr_queue, scheduler); pvr_queue_timedout_job() local
916 pvr_queue_signal_done_fences(struct pvr_queue * queue) pvr_queue_signal_done_fences() argument
946 pvr_queue_check_job_waiting_for_cccb_space(struct pvr_queue * queue) pvr_queue_check_job_waiting_for_cccb_space() argument
999 pvr_queue_process(struct pvr_queue * queue) pvr_queue_process() argument
1008 get_dm_type(struct pvr_queue * queue) get_dm_type() argument
1032 init_fw_context(struct pvr_queue * queue,void * fw_ctx_map) init_fw_context() argument
1063 pvr_queue_cleanup_fw_context(struct pvr_queue * queue) pvr_queue_cleanup_fw_context() argument
1090 struct pvr_queue *queue; pvr_queue_job_init() local
1166 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); pvr_queue_job_push() local
1178 struct pvr_queue *queue = priv; reg_state_init() local
1240 struct pvr_queue *queue; pvr_queue_create() local
1348 struct pvr_queue *queue; pvr_queue_device_pre_reset() local
1360 struct pvr_queue *queue; pvr_queue_device_post_reset() local
1379 pvr_queue_kill(struct pvr_queue * queue) pvr_queue_kill() argument
1393 pvr_queue_destroy(struct pvr_queue * queue) pvr_queue_destroy() argument
[all...]
/linux/drivers/scsi/arm/
H A Dqueue.c3 * linux/drivers/acorn/scsi/queue.c: queue handling primitives
50 #include "queue.h"
55 * Function: void queue_initialise (Queue_t *queue)
56 * Purpose : initialise a queue
57 * Params : queue - queue to initialise
59 int queue_initialise (Queue_t *queue) in queue_initialise() argument
64 spin_lock_init(&queue->queue_lock); in queue_initialise()
65 INIT_LIST_HEAD(&queue in queue_initialise()
91 queue_free(Queue_t * queue) queue_free() argument
107 __queue_add(Queue_t * queue,struct scsi_cmnd * SCpnt,int head) __queue_add() argument
138 __queue_remove(Queue_t * queue,struct list_head * ent) __queue_remove() argument
162 queue_remove_exclude(Queue_t * queue,unsigned long * exclude) queue_remove_exclude() argument
188 queue_remove(Queue_t * queue) queue_remove() argument
210 queue_remove_tgtluntag(Queue_t * queue,int target,int lun,int tag) queue_remove_tgtluntag() argument
238 queue_remove_all_target(Queue_t * queue,int target) queue_remove_all_target() argument
261 queue_probetgtlun(Queue_t * queue,int target,int lun) queue_probetgtlun() argument
287 queue_remove_cmd(Queue_t * queue,struct scsi_cmnd * SCpnt) queue_remove_cmd() argument
[all...]
/linux/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.h196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument
200 if (q_offset >= queue->queue_length) in hw_qeit_calc()
201 q_offset -= queue->queue_length; in hw_qeit_calc()
202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc()
206 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument
208 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get()
211 static inline void hw_qeit_inc(struct hw_queue *queue) in hw_qeit_inc() argument
213 queue->current_q_offset += queue in hw_qeit_inc()
221 hw_qeit_get_inc(struct hw_queue * queue) hw_qeit_get_inc() argument
228 hw_qeit_get_inc_valid(struct hw_queue * queue) hw_qeit_get_inc_valid() argument
245 hw_qeit_get_valid(struct hw_queue * queue) hw_qeit_get_valid() argument
261 hw_qeit_reset(struct hw_queue * queue) hw_qeit_reset() argument
267 hw_qeit_eq_get_inc(struct hw_queue * queue) hw_qeit_eq_get_inc() argument
281 hw_eqit_eq_get_inc_valid(struct hw_queue * queue) hw_eqit_eq_get_inc_valid() argument
295 struct hw_queue *queue; ehea_get_next_rwqe() local
310 struct hw_queue *queue = &my_qp->hw_squeue; ehea_get_swqe() local
327 struct hw_queue *queue = &qp->hw_rqueue1; ehea_poll_rq1() local
[all...]
/linux/drivers/nvme/target/
H A Drdma.c52 struct nvmet_rdma_queue *queue; member
66 struct nvmet_rdma_queue *queue; member
147 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
213 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument
218 tag = sbitmap_get(&queue->rsp_tags); in nvmet_rdma_get_rsp()
220 rsp = &queue->rsps[tag]; in nvmet_rdma_get_rsp()
228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, in nvmet_rdma_get_rsp()
243 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
248 sbitmap_clear_bit(&rsp->queue in nvmet_rdma_put_rsp()
448 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue * queue) nvmet_rdma_alloc_rsps() argument
483 nvmet_rdma_free_rsps(struct nvmet_rdma_queue * queue) nvmet_rdma_free_rsps() argument
514 nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue * queue) nvmet_rdma_process_wr_wait_list() argument
663 struct nvmet_rdma_queue *queue = rsp->queue; nvmet_rdma_release_rsp() local
679 nvmet_rdma_error_comp(struct nvmet_rdma_queue * queue) nvmet_rdma_error_comp() argument
697 struct nvmet_rdma_queue *queue = wc->qp->qp_context; nvmet_rdma_send_done() local
750 struct nvmet_rdma_queue *queue = wc->qp->qp_context; nvmet_rdma_read_data_done() local
783 struct nvmet_rdma_queue *queue = wc->qp->qp_context; nvmet_rdma_write_data_done() local
945 struct nvmet_rdma_queue *queue = rsp->queue; nvmet_rdma_execute_command() local
967 nvmet_rdma_handle_command(struct nvmet_rdma_queue * queue,struct nvmet_rdma_rsp * cmd) nvmet_rdma_handle_command() argument
998 nvmet_rdma_recv_not_live(struct nvmet_rdma_queue * queue,struct nvmet_rdma_rsp * rsp) nvmet_rdma_recv_not_live() argument
1023 struct nvmet_rdma_queue *queue = wc->qp->qp_context; nvmet_rdma_recv_done() local
1263 nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue * queue) nvmet_rdma_create_queue_ib() argument
1340 nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue * queue) nvmet_rdma_destroy_queue_ib() argument
1350 nvmet_rdma_free_queue(struct nvmet_rdma_queue * queue) nvmet_rdma_free_queue() argument
1370 struct nvmet_rdma_queue *queue = nvmet_rdma_release_queue_work() local
1381 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param * conn,struct nvmet_rdma_queue * queue) nvmet_rdma_parse_cm_connect_req() argument
1430 struct nvmet_rdma_queue *queue; nvmet_rdma_alloc_queue() local
1530 struct nvmet_rdma_queue *queue = priv; nvmet_rdma_qp_event() local
1548 nvmet_rdma_cm_accept(struct rdma_cm_id * cm_id,struct nvmet_rdma_queue * queue,struct rdma_conn_param * p) nvmet_rdma_cm_accept() argument
1575 struct nvmet_rdma_queue *queue; nvmet_rdma_queue_connect() local
1630 nvmet_rdma_queue_established(struct nvmet_rdma_queue * queue) nvmet_rdma_queue_established() argument
1657 __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue * queue) __nvmet_rdma_queue_disconnect() argument
1692 nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue * queue) nvmet_rdma_queue_disconnect() argument
1708 nvmet_rdma_queue_connect_fail(struct rdma_cm_id * cm_id,struct nvmet_rdma_queue * queue) nvmet_rdma_queue_connect_fail() argument
1737 nvmet_rdma_device_removal(struct rdma_cm_id * cm_id,struct nvmet_rdma_queue * queue) nvmet_rdma_device_removal() argument
1771 struct nvmet_rdma_queue *queue = NULL; nvmet_rdma_cm_handler() local
1822 struct nvmet_rdma_queue *queue, *n; nvmet_rdma_delete_ctrl() local
1836 struct nvmet_rdma_queue *queue, *tmp; nvmet_rdma_destroy_port_queues() local
2020 struct nvmet_rdma_queue *queue = nvmet_rdma_host_port_addr() local
2058 struct nvmet_rdma_queue *queue, *tmp; nvmet_rdma_remove_one() local
[all...]
/linux/drivers/gpu/drm/msm/
H A Dmsm_submitqueue.c70 struct msm_gpu_submitqueue *queue = container_of(kref, in msm_submitqueue_destroy() local
73 idr_destroy(&queue->fence_idr); in msm_submitqueue_destroy()
75 if (queue->entity == &queue->_vm_bind_entity[0]) in msm_submitqueue_destroy()
76 drm_sched_entity_destroy(queue->entity); in msm_submitqueue_destroy()
78 msm_context_put(queue->ctx); in msm_submitqueue_destroy()
80 kfree(queue); in msm_submitqueue_destroy()
108 struct msm_gpu_submitqueue *queue, *tmp; in msm_submitqueue_close() local
117 list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) { in msm_submitqueue_close()
118 if (queue in msm_submitqueue_close()
171 struct msm_gpu_submitqueue *queue; msm_submitqueue_create() local
278 msm_submitqueue_query_faults(struct msm_gpu_submitqueue * queue,struct drm_msm_submitqueue_query * args) msm_submitqueue_query_faults() argument
301 struct msm_gpu_submitqueue *queue; msm_submitqueue_query() local
[all...]
/linux/net/sunrpc/
H A Dsched.c91 * queue->lock and bh_disabled in order to avoid races within
95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument
101 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer()
102 cancel_delayed_work(&queue->timer_list.dwork); in __rpc_disable_timer()
106 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument
109 queue->timer_list.expires = expires; in rpc_set_queue_timer()
114 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); in rpc_set_queue_timer()
121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument
125 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue in __rpc_add_timer()
130 rpc_set_waitqueue_priority(struct rpc_wait_queue * queue,int priority) rpc_set_waitqueue_priority() argument
138 rpc_reset_waitqueue_priority(struct rpc_wait_queue * queue) rpc_reset_waitqueue_priority() argument
193 __rpc_add_wait_queue_priority(struct rpc_wait_queue * queue,struct rpc_task * task,unsigned char queue_priority) __rpc_add_wait_queue_priority() argument
205 __rpc_add_wait_queue(struct rpc_wait_queue * queue,struct rpc_task * task,unsigned char queue_priority) __rpc_add_wait_queue() argument
233 __rpc_remove_wait_queue(struct rpc_wait_queue * queue,struct rpc_task * task) __rpc_remove_wait_queue() argument
243 __rpc_init_priority_wait_queue(struct rpc_wait_queue * queue,const char * qname,unsigned char nr_queues) __rpc_init_priority_wait_queue() argument
259 rpc_init_priority_wait_queue(struct rpc_wait_queue * queue,const char * qname) rpc_init_priority_wait_queue() argument
265 rpc_init_wait_queue(struct rpc_wait_queue * queue,const char * qname) rpc_init_wait_queue() argument
271 rpc_destroy_wait_queue(struct rpc_wait_queue * queue) rpc_destroy_wait_queue() argument
511 __rpc_do_wake_up_task_on_wq(struct workqueue_struct * wq,struct rpc_wait_queue * queue,struct rpc_task * task) __rpc_do_wake_up_task_on_wq() argument
532 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct * wq,struct rpc_wait_queue * queue,struct rpc_task * task,bool (* action)(struct rpc_task *,void *),void * data) rpc_wake_up_task_on_wq_queue_action_locked() argument
550 rpc_wake_up_task_queue_locked(struct rpc_wait_queue * queue,struct rpc_task * task) rpc_wake_up_task_queue_locked() argument
560 rpc_wake_up_queued_task(struct rpc_wait_queue * queue,struct rpc_task * task) rpc_wake_up_queued_task() argument
577 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue * queue,struct rpc_task * task,int status) rpc_wake_up_task_queue_set_status_locked() argument
594 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue * queue,struct rpc_task * task,int status) rpc_wake_up_queued_task_set_status() argument
607 __rpc_find_next_queued_priority(struct rpc_wait_queue * queue) __rpc_find_next_queued_priority() argument
654 __rpc_find_next_queued(struct rpc_wait_queue * queue) __rpc_find_next_queued() argument
667 rpc_wake_up_first_on_wq(struct workqueue_struct * wq,struct rpc_wait_queue * queue,bool (* func)(struct rpc_task *,void *),void * data) rpc_wake_up_first_on_wq() argument
685 rpc_wake_up_first(struct rpc_wait_queue * queue,bool (* func)(struct rpc_task *,void *),void * data) rpc_wake_up_first() argument
700 rpc_wake_up_next(struct rpc_wait_queue * queue) rpc_wake_up_next() argument
711 rpc_wake_up_locked(struct rpc_wait_queue * queue) rpc_wake_up_locked() argument
729 rpc_wake_up(struct rpc_wait_queue * queue) rpc_wake_up() argument
742 rpc_wake_up_status_locked(struct rpc_wait_queue * queue,int status) rpc_wake_up_status_locked() argument
761 rpc_wake_up_status(struct rpc_wait_queue * queue,int status) rpc_wake_up_status() argument
771 struct rpc_wait_queue *queue = container_of(work, __rpc_queue_timer_fn() local
861 struct rpc_wait_queue *queue; rpc_signal_task() local
876 struct rpc_wait_queue *queue; rpc_task_try_cancel() local
913 struct rpc_wait_queue *queue; __rpc_execute() local
[all...]
/linux/drivers/net/wireless/ath/ath5k/
H A Dqcu.c35 * basically we have 10 queues to play with. Each queue has a matching
36 * QCU that controls when the queue will get triggered and multiple QCUs
39 * and DCUs allowing us to have different DFS settings for each queue.
41 * When a frame goes into a TX queue, QCU decides when it'll trigger a
43 * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
58 * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
60 * @queue: One of enum ath5k_tx_queue_id
63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument
66 AR5K_ASSERT_ENTRY(queue, a in ath5k_hw_num_tx_pending()
94 ath5k_hw_release_tx_queue(struct ath5k_hw * ah,unsigned int queue) ath5k_hw_release_tx_queue() argument
138 ath5k_hw_get_tx_queueprops(struct ath5k_hw * ah,int queue,struct ath5k_txq_info * queue_info) ath5k_hw_get_tx_queueprops() argument
154 ath5k_hw_set_tx_queueprops(struct ath5k_hw * ah,int queue,const struct ath5k_txq_info * qinfo) ath5k_hw_set_tx_queueprops() argument
206 unsigned int queue; ath5k_hw_setup_tx_queue() local
282 ath5k_hw_set_tx_retry_limits(struct ath5k_hw * ah,unsigned int queue) ath5k_hw_set_tx_retry_limits() argument
324 ath5k_hw_reset_tx_queue(struct ath5k_hw * ah,unsigned int queue) ath5k_hw_reset_tx_queue() argument
[all...]

12345678910>>...65