Home
last modified time | relevance | path

Searched full:q (Results 1 – 25 of 2778) sorted by relevance

12345678910>>...112

/linux/lib/crypto/
H A Dgf128mul.c57 #define gf128mul_dat(q) { \ argument
58 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
59 q(
[all...]
/linux/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_rx.c50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) in cache_offer() argument
52 struct funeth_rx_cache *c = &q->cache; in cache_offer()
58 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_offer()
67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) in cache_get() argument
69 struct funeth_rx_cache *c = &q->cache; in cache_get()
77 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in cache_get()
88 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_get()
98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, in funeth_alloc_page() argument
103 if (cache_get(q, rb)) in funeth_alloc_page()
110 rb->dma_addr = dma_map_page(q in funeth_alloc_page()
127 funeth_free_page(struct funeth_rxq * q,struct funeth_rxbuf * rb) funeth_free_page() argument
141 fun_run_xdp(struct funeth_rxq * q,skb_frag_t * frags,void * buf_va,int ref_ok,struct funeth_txq * xdp_q) fun_run_xdp() argument
256 get_buf(struct funeth_rxq * q,struct funeth_rxbuf * buf,unsigned int len) get_buf() argument
296 fun_gather_pkt(struct funeth_rxq * q,unsigned int tot_len,skb_frag_t * frags) fun_gather_pkt() argument
348 advance_cq(struct funeth_rxq * q) advance_cq() argument
365 fun_handle_cqe_pkt(struct funeth_rxq * q,struct funeth_txq * xdp_q) fun_handle_cqe_pkt() argument
479 fun_process_cqes(struct funeth_rxq * q,int budget) fun_process_cqes() argument
513 struct funeth_rxq *q = irq->rxq; fun_rxq_napi_poll() local
536 fun_rxq_free_bufs(struct funeth_rxq * q) fun_rxq_free_bufs() argument
549 fun_rxq_alloc_bufs(struct funeth_rxq * q,int node) fun_rxq_alloc_bufs() argument
575 fun_rxq_free_cache(struct funeth_rxq * q) fun_rxq_free_cache() argument
587 fun_rxq_set_bpf(struct funeth_rxq * q,struct bpf_prog * prog) fun_rxq_set_bpf() argument
620 struct funeth_rxq *q; fun_rxq_create_sw() local
677 fun_rxq_free_sw(struct funeth_rxq * q) fun_rxq_free_sw() argument
697 fun_rxq_create_dev(struct funeth_rxq * q,struct fun_irq * irq) fun_rxq_create_dev() argument
765 fun_rxq_free_dev(struct funeth_rxq * q) fun_rxq_free_dev() argument
792 struct funeth_rxq *q = *qp; funeth_rxq_create() local
817 funeth_rxq_free(struct funeth_rxq * q,int state) funeth_rxq_free() argument
[all...]
H A Dfuneth_tx.c56 static void *txq_end(const struct funeth_txq *q) in txq_end() argument
58 return (void *)q->hw_wb; in txq_end()
64 static unsigned int txq_to_end(const struct funeth_txq *q, void *p) in txq_to_end() argument
66 return txq_end(q) - p; in txq_to_end()
78 static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, in fun_write_gl() argument
90 i < ngle && txq_to_end(q, gle); i++, gle++) in fun_write_gl()
93 if (txq_to_end(q, gle) == 0) { in fun_write_gl()
94 gle = (struct fun_dataop_gl *)q->desc; in fun_write_gl()
107 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, in fun_tls_tx() argument
132 FUN_QSTAT_INC(q, tx_tls_fallbac in fun_tls_tx()
149 write_pkt_desc(struct sk_buff * skb,struct funeth_txq * q,unsigned int tls_len) write_pkt_desc() argument
311 fun_txq_avail(const struct funeth_txq * q) fun_txq_avail() argument
317 fun_tx_check_stop(struct funeth_txq * q) fun_tx_check_stop() argument
338 fun_txq_may_restart(struct funeth_txq * q) fun_txq_may_restart() argument
347 struct funeth_txq *q = fp->txqs[qid]; fun_start_xmit() local
385 txq_hw_head(const struct funeth_txq * q) txq_hw_head() argument
393 fun_unmap_pkt(const struct funeth_txq * q,unsigned int idx) fun_unmap_pkt() argument
423 fun_txq_reclaim(struct funeth_txq * q,int budget) fun_txq_reclaim() argument
471 struct funeth_txq *q = irq->txq; fun_txq_napi_poll() local
484 fun_xdpq_clean(struct funeth_txq * q,unsigned int budget) fun_xdpq_clean() argument
514 fun_xdp_tx(struct funeth_txq * q,struct xdp_frame * xdpf) fun_xdp_tx() argument
575 struct funeth_txq *q, **xdpqs; fun_xdp_xmit_frames() local
601 fun_txq_purge(struct funeth_txq * q) fun_txq_purge() argument
612 fun_xdpq_purge(struct funeth_txq * q) fun_xdpq_purge() argument
629 struct funeth_txq *q; fun_txq_create_sw() local
665 fun_txq_free_sw(struct funeth_txq * q) fun_txq_free_sw() argument
680 fun_txq_create_dev(struct funeth_txq * q,struct fun_irq * irq) fun_txq_create_dev() argument
731 fun_txq_free_dev(struct funeth_txq * q) fun_txq_free_dev() argument
763 struct funeth_txq *q = *qp; funeth_txq_create() local
789 funeth_txq_free(struct funeth_txq * q,int state) funeth_txq_free() argument
[all...]
/linux/drivers/gpu/drm/xe/
H A Dxe_guc_submit.c49 exec_queue_to_guc(struct xe_exec_queue *q) in exec_queue_to_guc() argument
51 return &q->gt->uc.guc; in exec_queue_to_guc()
72 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument
74 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered()
77 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument
79 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered()
82 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument
84 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered()
87 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument
89 return atomic_read(&q in exec_queue_enabled()
92 set_exec_queue_enabled(struct xe_exec_queue * q) set_exec_queue_enabled() argument
97 clear_exec_queue_enabled(struct xe_exec_queue * q) clear_exec_queue_enabled() argument
102 exec_queue_pending_enable(struct xe_exec_queue * q) exec_queue_pending_enable() argument
107 set_exec_queue_pending_enable(struct xe_exec_queue * q) set_exec_queue_pending_enable() argument
112 clear_exec_queue_pending_enable(struct xe_exec_queue * q) clear_exec_queue_pending_enable() argument
117 exec_queue_pending_disable(struct xe_exec_queue * q) exec_queue_pending_disable() argument
122 set_exec_queue_pending_disable(struct xe_exec_queue * q) set_exec_queue_pending_disable() argument
127 clear_exec_queue_pending_disable(struct xe_exec_queue * q) clear_exec_queue_pending_disable() argument
132 exec_queue_destroyed(struct xe_exec_queue * q) exec_queue_destroyed() argument
137 set_exec_queue_destroyed(struct xe_exec_queue * q) set_exec_queue_destroyed() argument
142 exec_queue_banned(struct xe_exec_queue * q) exec_queue_banned() argument
147 set_exec_queue_banned(struct xe_exec_queue * q) set_exec_queue_banned() argument
152 exec_queue_suspended(struct xe_exec_queue * q) exec_queue_suspended() argument
157 set_exec_queue_suspended(struct xe_exec_queue * q) set_exec_queue_suspended() argument
162 clear_exec_queue_suspended(struct xe_exec_queue * q) clear_exec_queue_suspended() argument
167 exec_queue_reset(struct xe_exec_queue * q) exec_queue_reset() argument
172 set_exec_queue_reset(struct xe_exec_queue * q) set_exec_queue_reset() argument
177 exec_queue_killed(struct xe_exec_queue * q) exec_queue_killed() argument
182 set_exec_queue_killed(struct xe_exec_queue * q) set_exec_queue_killed() argument
187 exec_queue_wedged(struct xe_exec_queue * q) exec_queue_wedged() argument
192 set_exec_queue_wedged(struct xe_exec_queue * q) set_exec_queue_wedged() argument
197 exec_queue_check_timeout(struct xe_exec_queue * q) exec_queue_check_timeout() argument
202 set_exec_queue_check_timeout(struct xe_exec_queue * q) set_exec_queue_check_timeout() argument
207 clear_exec_queue_check_timeout(struct xe_exec_queue * q) clear_exec_queue_check_timeout() argument
212 exec_queue_extra_ref(struct xe_exec_queue * q) exec_queue_extra_ref() argument
217 set_exec_queue_extra_ref(struct xe_exec_queue * q) set_exec_queue_extra_ref() argument
222 exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue * q) exec_queue_killed_or_banned_or_wedged() argument
250 struct xe_exec_queue *q; guc_submit_wedged_fini() local
319 __release_guc_id(struct xe_guc * guc,struct xe_exec_queue * q,u32 xa_count) __release_guc_id() argument
335 alloc_guc_id(struct xe_guc * guc,struct xe_exec_queue * q) alloc_guc_id() argument
371 release_guc_id(struct xe_guc * guc,struct xe_exec_queue * q) release_guc_id() argument
427 init_policies(struct xe_guc * guc,struct xe_exec_queue * q) init_policies() argument
451 set_min_preemption_timeout(struct xe_guc * guc,struct xe_exec_queue * q) set_min_preemption_timeout() argument
470 __register_mlrc_exec_queue(struct xe_guc * guc,struct xe_exec_queue * q,struct guc_ctxt_registration_info * info) __register_mlrc_exec_queue() argument
545 register_exec_queue(struct xe_exec_queue * q) register_exec_queue() argument
599 wq_space_until_wrap(struct xe_exec_queue * q) wq_space_until_wrap() argument
604 wq_wait_for_space(struct xe_exec_queue * q,u32 wqi_size) wq_wait_for_space() argument
632 wq_noop_append(struct xe_exec_queue * q) wq_noop_append() argument
652 wq_item_append(struct xe_exec_queue * q) wq_item_append() argument
697 submit_exec_queue(struct xe_exec_queue * q) submit_exec_queue() argument
752 struct xe_exec_queue *q = job->q; guc_exec_queue_run_job() local
793 MAKE_SCHED_CONTEXT_ACTION(q,enable_disable) global() argument
801 disable_scheduling_deregister(struct xe_guc * guc,struct xe_exec_queue * q) disable_scheduling_deregister() argument
837 xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue * q) xe_guc_exec_queue_trigger_cleanup() argument
861 struct xe_exec_queue *q; xe_guc_submit_wedge() local
908 struct xe_exec_queue *q = ge->q; xe_guc_exec_queue_lr_cleanup() local
966 check_timeout(struct xe_exec_queue * q,struct xe_sched_job * job) check_timeout() argument
1007 enable_scheduling(struct xe_exec_queue * q) enable_scheduling() argument
1036 disable_scheduling(struct xe_exec_queue * q,bool immediate) disable_scheduling() argument
1055 __deregister_exec_queue(struct xe_guc * guc,struct xe_exec_queue * q) __deregister_exec_queue() argument
1079 struct xe_exec_queue *q = job->q; guc_exec_queue_timedout_job() local
1284 struct xe_exec_queue *q = ge->q; __guc_exec_queue_fini_async() local
1307 guc_exec_queue_fini_async(struct xe_exec_queue * q) guc_exec_queue_fini_async() argument
1321 __guc_exec_queue_fini(struct xe_guc * guc,struct xe_exec_queue * q) __guc_exec_queue_fini() argument
1335 struct xe_exec_queue *q = msg->private_data; __guc_exec_queue_process_msg_cleanup() local
1347 guc_exec_queue_allowed_to_change_state(struct xe_exec_queue * q) guc_exec_queue_allowed_to_change_state() argument
1354 struct xe_exec_queue *q = msg->private_data; __guc_exec_queue_process_msg_set_sched_props() local
1362 __suspend_fence_signal(struct xe_exec_queue * q) __suspend_fence_signal() argument
1371 suspend_fence_signal(struct xe_exec_queue * q) suspend_fence_signal() argument
1384 struct xe_exec_queue *q = msg->private_data; __guc_exec_queue_process_msg_suspend() local
1413 struct xe_exec_queue *q = msg->private_data; __guc_exec_queue_process_msg_resume() local
1469 guc_exec_queue_init(struct xe_exec_queue * q) guc_exec_queue_init() argument
1538 guc_exec_queue_kill(struct xe_exec_queue * q) guc_exec_queue_kill() argument
1546 guc_exec_queue_add_msg(struct xe_exec_queue * q,struct xe_sched_msg * msg,u32 opcode) guc_exec_queue_add_msg() argument
1562 guc_exec_queue_try_add_msg(struct xe_exec_queue * q,struct xe_sched_msg * msg,u32 opcode) guc_exec_queue_try_add_msg() argument
1577 guc_exec_queue_fini(struct xe_exec_queue * q) guc_exec_queue_fini() argument
1587 guc_exec_queue_set_priority(struct xe_exec_queue * q,enum xe_exec_queue_priority priority) guc_exec_queue_set_priority() argument
1606 guc_exec_queue_set_timeslice(struct xe_exec_queue * q,u32 timeslice_us) guc_exec_queue_set_timeslice() argument
1624 guc_exec_queue_set_preempt_timeout(struct xe_exec_queue * q,u32 preempt_timeout_us) guc_exec_queue_set_preempt_timeout() argument
1643 guc_exec_queue_suspend(struct xe_exec_queue * q) guc_exec_queue_suspend() argument
1659 guc_exec_queue_suspend_wait(struct xe_exec_queue * q) guc_exec_queue_suspend_wait() argument
1686 guc_exec_queue_resume(struct xe_exec_queue * q) guc_exec_queue_resume() argument
1699 guc_exec_queue_reset_status(struct xe_exec_queue * q) guc_exec_queue_reset_status() argument
1723 guc_exec_queue_stop(struct xe_guc * guc,struct xe_exec_queue * q) guc_exec_queue_stop() argument
1805 struct xe_exec_queue *q; xe_guc_submit_stop() local
1829 guc_exec_queue_start(struct xe_exec_queue * q) guc_exec_queue_start() argument
1848 struct xe_exec_queue *q; xe_guc_submit_start() local
1873 struct xe_exec_queue *q; g2h_exec_queue_lookup() local
1892 deregister_exec_queue(struct xe_guc * guc,struct xe_exec_queue * q) deregister_exec_queue() argument
1909 handle_sched_done(struct xe_guc * guc,struct xe_exec_queue * q,u32 runnable_state) handle_sched_done() argument
1956 struct xe_exec_queue *q; xe_guc_sched_done_handler() local
1983 handle_deregister_done(struct xe_guc * guc,struct xe_exec_queue * q) handle_deregister_done() argument
1997 struct xe_exec_queue *q; xe_guc_deregister_done_handler() local
2025 struct xe_exec_queue *q; xe_guc_exec_queue_reset_handler() local
2087 struct xe_exec_queue *q; xe_guc_exec_queue_memory_cat_error_handler() local
2159 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue * q,struct xe_guc_submit_exec_queue_snapshot * snapshot) guc_exec_queue_wq_snapshot_capture() argument
2217 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue * q) xe_guc_exec_queue_snapshot_capture() argument
2371 guc_exec_queue_print(struct xe_exec_queue * q,struct drm_printer * p) guc_exec_queue_print() argument
2389 struct xe_exec_queue *q; xe_guc_submit_print() local
[all...]
H A Dxe_exec_queue.c37 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
40 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument
42 if (xe_exec_queue_uses_pxp(q)) in __xe_exec_queue_free()
43 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in __xe_exec_queue_free()
44 if (q->vm) in __xe_exec_queue_free()
45 xe_vm_put(q->vm); in __xe_exec_queue_free()
47 if (q->xef) in __xe_exec_queue_free()
48 xe_file_put(q->xef); in __xe_exec_queue_free()
50 kfree(q); in __xe_exec_queue_free()
59 struct xe_exec_queue *q; __xe_exec_queue_alloc() local
115 __xe_exec_queue_init(struct xe_exec_queue * q) __xe_exec_queue_init() argument
159 struct xe_exec_queue *q; xe_exec_queue_create() local
242 struct xe_exec_queue *q; xe_exec_queue_create_bind() local
273 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); xe_exec_queue_destroy() local
289 xe_exec_queue_fini(struct xe_exec_queue * q) xe_exec_queue_fini() argument
307 xe_exec_queue_assign_name(struct xe_exec_queue * q,u32 instance) xe_exec_queue_assign_name() argument
335 struct xe_exec_queue *q; xe_exec_queue_lookup() local
353 exec_queue_set_priority(struct xe_device * xe,struct xe_exec_queue * q,u64 value) exec_queue_set_priority() argument
418 exec_queue_set_timeslice(struct xe_device * xe,struct xe_exec_queue * q,u64 value) exec_queue_set_timeslice() argument
435 exec_queue_set_pxp_type(struct xe_device * xe,struct xe_exec_queue * q,u64 value) exec_queue_set_pxp_type() argument
461 exec_queue_user_ext_set_property(struct xe_device * xe,struct xe_exec_queue * q,u64 extension) exec_queue_user_ext_set_property() argument
497 exec_queue_user_extensions(struct xe_device * xe,struct xe_exec_queue * q,u64 extensions,int ext_number) exec_queue_user_extensions() argument
593 struct xe_exec_queue *q = NULL; xe_exec_queue_create_ioctl() local
720 struct xe_exec_queue *q; xe_exec_queue_get_property_ioctl() local
750 xe_exec_queue_is_lr(struct xe_exec_queue * q) xe_exec_queue_is_lr() argument
756 xe_exec_queue_num_job_inflight(struct xe_exec_queue * q) xe_exec_queue_num_job_inflight() argument
767 xe_exec_queue_ring_full(struct xe_exec_queue * q) xe_exec_queue_ring_full() argument
789 xe_exec_queue_is_idle(struct xe_exec_queue * q) xe_exec_queue_is_idle() argument
815 xe_exec_queue_update_run_ticks(struct xe_exec_queue * q) xe_exec_queue_update_run_ticks() argument
856 xe_exec_queue_kill(struct xe_exec_queue * q) xe_exec_queue_kill() argument
876 struct xe_exec_queue *q; xe_exec_queue_destroy_ioctl() local
902 xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue * q,struct xe_vm * vm) xe_exec_queue_last_fence_lockdep_assert() argument
918 xe_exec_queue_last_fence_put(struct xe_exec_queue * q,struct xe_vm * vm) xe_exec_queue_last_fence_put() argument
931 xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue * q) xe_exec_queue_last_fence_put_unlocked() argument
948 xe_exec_queue_last_fence_get(struct xe_exec_queue * q,struct xe_vm * vm) xe_exec_queue_last_fence_get() argument
975 xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue * q,struct xe_vm * vm) xe_exec_queue_last_fence_get_for_resume() argument
1000 xe_exec_queue_last_fence_set(struct xe_exec_queue * q,struct xe_vm * vm,struct dma_fence * fence) xe_exec_queue_last_fence_set() argument
1017 xe_exec_queue_last_fence_test_dep(struct xe_exec_queue * q,struct xe_vm * vm) xe_exec_queue_last_fence_test_dep() argument
[all...]
/linux/Documentation/networking/
H A Dtls-offload-layers.svg1 <svg version="1.1" viewBox="0.0 0.0 460.0 500.0" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg"><clipPath id="p.0"><path d="m0 0l960.0 0l0 720.0l-960.0 0l0 -720.0z" clip-rule="nonzero"/></clipPath><g clip-path="url(#p.0)"><path fill="#000000" fill-opacity="0.0" d="m0 0l960.0 0l0 720.0l-960.0 0z" fill-rule="evenodd"/><path fill="#cfe2f3" d="m117.02887 0l72.28346 0l0 40.25197l-72.28346 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m117.02887 0l72.28346 0l0 40.25197l-72.28346 0z" fill-rule="evenodd"/><path fill="#000000" d="m135.71944 27.045982l0 -9.671875l1.46875 0l0 1.46875q0.5625 -1.03125 1.03125 -1.359375q0.484375 -0.328125 1.0625 -0.328125q0.828125 0 1.6875 0.53125l-0.5625 1.515625q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.609375 0.890625q-0.28125 0.875 -0.28125 1.921875l0 5.0625l-1.625 0zm12.853302 -3.109375l1.6875 0.203125q-0.40625 1.484375 -1.484375 2.3125q-1.078125 0.8125 -2.765625 0.8125q-2.125 0 -3.375 -1.296875q-1.234375 -1.3125 -1.234375 -3.671875q0 -2.453125 1.25 -3.796875q1.265625 -1.34375 3.265625 -1.34375q1.9375 0 3.15625 1.328125q1.234375 1.3125 1.234375 3.703125q0 0.15625 0 0.4375l-7.21875 0q0.09375 1.59375 0.90625 2.453125q0.8125 0.84375 2.015625 0.84375q0.90625 0 1.546875 -0.46875q0.640625 -0.484375 1.015625 -1.515625zm-5.390625 -2.65625l5.40625 0q-0.109375 -1.21875 -0.625 -1.828125q
[all...]
/linux/sound/core/seq/oss/
H A Dseq_oss_readq.c35 struct seq_oss_readq *q; in snd_seq_oss_readq_new() local
37 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_readq_new()
38 if (!q) in snd_seq_oss_readq_new()
41 q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL); in snd_seq_oss_readq_new()
42 if (!q->q) { in snd_seq_oss_readq_new()
43 kfree(q); in snd_seq_oss_readq_new()
47 q in snd_seq_oss_readq_new()
62 snd_seq_oss_readq_delete(struct seq_oss_readq * q) snd_seq_oss_readq_delete() argument
74 snd_seq_oss_readq_clear(struct seq_oss_readq * q) snd_seq_oss_readq_clear() argument
89 snd_seq_oss_readq_puts(struct seq_oss_readq * q,int dev,unsigned char * data,int len) snd_seq_oss_readq_puts() argument
123 snd_seq_oss_readq_sysex(struct seq_oss_readq * q,int dev,struct snd_seq_event * ev) snd_seq_oss_readq_sysex() argument
141 snd_seq_oss_readq_put_event(struct seq_oss_readq * q,union evrec * ev) snd_seq_oss_readq_put_event() argument
169 snd_seq_oss_readq_pick(struct seq_oss_readq * q,union evrec * rec) snd_seq_oss_readq_pick() argument
181 snd_seq_oss_readq_wait(struct seq_oss_readq * q) snd_seq_oss_readq_wait() argument
193 snd_seq_oss_readq_free(struct seq_oss_readq * q) snd_seq_oss_readq_free() argument
206 snd_seq_oss_readq_poll(struct seq_oss_readq * q,struct file * file,poll_table * wait) snd_seq_oss_readq_poll() argument
216 snd_seq_oss_readq_put_timestamp(struct seq_oss_readq * q,unsigned long curt,int seq_mode) snd_seq_oss_readq_put_timestamp() argument
244 snd_seq_oss_readq_info_read(struct seq_oss_readq * q,struct snd_info_buffer * buf) snd_seq_oss_readq_info_read() argument
[all...]
H A Dseq_oss_event.c22 static int extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev);
27 static int old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev);
42 snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) in snd_seq_oss_process_event() argument
44 switch (q->s.code) { in snd_seq_oss_process_event()
46 return extended_event(dp, q, ev); in snd_seq_oss_process_event()
49 return chn_voice_event(dp, q, ev); in snd_seq_oss_process_event()
52 return chn_common_event(dp, q, ev); in snd_seq_oss_process_event()
55 return timing_event(dp, q, ev); in snd_seq_oss_process_event()
58 return local_event(dp, q, ev); in snd_seq_oss_process_event()
61 return snd_seq_oss_synth_sysex(dp, q in snd_seq_oss_process_event()
95 old_event(struct seq_oss_devinfo * dp,union evrec * q,struct snd_seq_event * ev) old_event() argument
121 extended_event(struct seq_oss_devinfo * dp,union evrec * q,struct snd_seq_event * ev) extended_event() argument
175 chn_voice_event(struct seq_oss_devinfo * dp,union evrec * q,struct snd_seq_event * ev) chn_voice_event() argument
196 chn_common_event(struct seq_oss_devinfo * dp,union evrec * q,struct snd_seq_event * ev) chn_common_event() argument
223 timing_event(struct seq_oss_devinfo * dp,union evrec * q,struct snd_seq_event * ev) timing_event() argument
258 local_event(struct seq_oss_devinfo * dp,union evrec * q,struct snd_seq_event * ev) local_event() argument
[all...]
/linux/net/sched/
H A Dsch_choke.c75 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument
77 return (q->tail - q->head) & q->tab_mask; in choke_len()
81 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument
83 return q->flags & TC_RED_ECN; in use_ecn()
87 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument
89 return q->flags & TC_RED_HARDDROP; in use_harddrop()
93 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument
96 q in choke_zap_head_holes()
103 choke_zap_tail_holes(struct choke_sched_data * q) choke_zap_tail_holes() argument
116 struct choke_sched_data *q = qdisc_priv(sch); choke_drop_by_idx() local
179 choke_peek_random(const struct choke_sched_data * q,unsigned int * pidx) choke_peek_random() argument
199 choke_match_random(const struct choke_sched_data * q,struct sk_buff * nskb,unsigned int * pidx) choke_match_random() argument
215 struct choke_sched_data *q = qdisc_priv(sch); choke_enqueue() local
285 struct choke_sched_data *q = qdisc_priv(sch); choke_dequeue() local
306 struct choke_sched_data *q = qdisc_priv(sch); choke_reset() local
338 struct choke_sched_data *q = qdisc_priv(sch); choke_change() local
433 struct choke_sched_data *q = qdisc_priv(sch); choke_dump() local
462 struct choke_sched_data *q = qdisc_priv(sch); choke_dump_stats() local
475 struct choke_sched_data *q = qdisc_priv(sch); choke_destroy() local
482 struct choke_sched_data *q = qdisc_priv(sch); choke_peek_head() local
[all...]
H A Dsch_netem.c210 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument
212 struct clgstate *clg = &q->clg; in loss_4state()
213 u32 rnd = prandom_u32_state(&q->prng.prng_state); in loss_4state()
275 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
277 struct clgstate *clg = &q->clg; in loss_gilb_ell()
278 struct rnd_state *s = &q->prng.prng_state; in loss_gilb_ell()
297 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
299 switch (q->loss_model) { in loss_event()
302 return q->loss && q in loss_event()
357 packet_time_ns(u64 len,const struct netem_sched_data * q) packet_time_ns() argument
374 struct netem_sched_data *q = qdisc_priv(sch); tfifo_reset() local
393 struct netem_sched_data *q = qdisc_priv(sch); tfifo_enqueue() local
451 struct netem_sched_data *q = qdisc_priv(sch); netem_enqueue() local
654 get_slot_next(struct netem_sched_data * q,u64 now) get_slot_next() argument
673 netem_peek(struct netem_sched_data * q) netem_peek() argument
690 netem_erase_head(struct netem_sched_data * q,struct sk_buff * skb) netem_erase_head() argument
703 struct netem_sched_data *q = qdisc_priv(sch); netem_dequeue() local
787 struct netem_sched_data *q = qdisc_priv(sch); netem_reset() local
828 get_slot(struct netem_sched_data * q,const struct nlattr * attr) get_slot() argument
850 get_correlation(struct netem_sched_data * q,const struct nlattr * attr) get_correlation() argument
859 get_reorder(struct netem_sched_data * q,const struct nlattr * attr) get_reorder() argument
867 get_corrupt(struct netem_sched_data * q,const struct nlattr * attr) get_corrupt() argument
875 get_rate(struct netem_sched_data * q,const struct nlattr * attr) get_rate() argument
889 get_loss_clg(struct netem_sched_data * q,const struct nlattr * attr) get_loss_clg() argument
981 struct Qdisc *root, *q; check_netem_in_tree() local
1015 struct netem_sched_data *q = qdisc_priv(sch); netem_change() local
1131 struct netem_sched_data *q = qdisc_priv(sch); netem_init() local
1148 struct netem_sched_data *q = qdisc_priv(sch); netem_destroy() local
1157 dump_loss_model(const struct netem_sched_data * q,struct sk_buff * skb) dump_loss_model() argument
1209 const struct netem_sched_data *q = qdisc_priv(sch); netem_dump() local
1296 struct netem_sched_data *q = qdisc_priv(sch); netem_dump_class() local
1310 struct netem_sched_data *q = qdisc_priv(sch); netem_graft() local
1318 struct netem_sched_data *q = qdisc_priv(sch); netem_leaf() local
[all...]
H A Dsch_sfq.c143 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument
146 return &q->slots[val].dep; in sfq_dep_head()
147 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head()
150 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument
153 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); in sfq_hash()
159 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local
166 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify()
169 fl = rcu_dereference_bh(q->filter_list); in sfq_classify()
171 return sfq_hash(q, sk in sfq_classify()
196 sfq_link(struct sfq_sched_data * q,sfq_index x) sfq_link() argument
212 sfq_unlink(q,x,n,p) global() argument
221 sfq_dec(struct sfq_sched_data * q,sfq_index x) sfq_dec() argument
234 sfq_inc(struct sfq_sched_data * q,sfq_index x) sfq_inc() argument
288 struct sfq_sched_data *q = qdisc_priv(sch); sfq_drop() local
325 sfq_prob_mark(const struct sfq_sched_data * q) sfq_prob_mark() argument
331 sfq_hard_mark(const struct sfq_sched_data * q) sfq_hard_mark() argument
336 sfq_headdrop(const struct sfq_sched_data * q) sfq_headdrop() argument
344 struct sfq_sched_data *q = qdisc_priv(sch); sfq_enqueue() local
478 struct sfq_sched_data *q = qdisc_priv(sch); sfq_dequeue() local
533 struct sfq_sched_data *q = qdisc_priv(sch); sfq_rehash() local
603 struct sfq_sched_data *q = timer_container_of(q, t, perturb_timer); sfq_perturbation() local
630 struct sfq_sched_data *q = qdisc_priv(sch); sfq_change() local
764 struct sfq_sched_data *q = qdisc_priv(sch); sfq_destroy() local
777 struct sfq_sched_data *q = qdisc_priv(sch); sfq_init() local
832 struct sfq_sched_data *q = qdisc_priv(sch); sfq_dump() local
883 sfq_unbind(struct Qdisc * q,unsigned long cl) sfq_unbind() argument
890 struct sfq_sched_data *q = qdisc_priv(sch); sfq_tcf_block() local
907 struct sfq_sched_data *q = qdisc_priv(sch); sfq_dump_class_stats() local
926 struct sfq_sched_data *q = qdisc_priv(sch); sfq_walk() local
[all...]
H A Dsch_sfb.c123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument
126 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen()
138 static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) in increment_qlen() argument
144 increment_one_qlen(sfbhash, 0, q); in increment_qlen()
148 increment_one_qlen(sfbhash, 1, q); in increment_qlen()
152 struct sfb_sched_data *q) in decrement_one_qlen() argument
155 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen()
167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() argument
173 decrement_one_qlen(sfbhash, 0, q); in decrement_qlen()
177 decrement_one_qlen(sfbhash, 1, q); in decrement_qlen()
180 decrement_prob(struct sfb_bucket * b,struct sfb_sched_data * q) decrement_prob() argument
185 increment_prob(struct sfb_bucket * b,struct sfb_sched_data * q) increment_prob() argument
190 sfb_zero_all_buckets(struct sfb_sched_data * q) sfb_zero_all_buckets() argument
198 sfb_compute_qlen(u32 * prob_r,u32 * avgpm_r,const struct sfb_sched_data * q) sfb_compute_qlen() argument
218 sfb_init_perturbation(u32 slot,struct sfb_sched_data * q) sfb_init_perturbation() argument
224 sfb_swap_slot(struct sfb_sched_data * q) sfb_swap_slot() argument
234 sfb_rate_limit(struct sk_buff * skb,struct sfb_sched_data * q) sfb_rate_limit() argument
284 struct sfb_sched_data *q = qdisc_priv(sch); sfb_enqueue() local
430 struct sfb_sched_data *q = qdisc_priv(sch); sfb_dequeue() local
448 struct sfb_sched_data *q = qdisc_priv(sch); sfb_peek() local
458 struct sfb_sched_data *q = qdisc_priv(sch); sfb_reset() local
470 struct sfb_sched_data *q = qdisc_priv(sch); sfb_destroy() local
495 struct sfb_sched_data *q = qdisc_priv(sch); sfb_change() local
558 struct sfb_sched_data *q = qdisc_priv(sch); sfb_init() local
571 struct sfb_sched_data *q = qdisc_priv(sch); sfb_dump() local
600 struct sfb_sched_data *q = qdisc_priv(sch); sfb_dump_stats() local
624 struct sfb_sched_data *q = qdisc_priv(sch); sfb_graft() local
635 struct sfb_sched_data *q = qdisc_priv(sch); sfb_leaf() local
672 struct sfb_sched_data *q = qdisc_priv(sch); sfb_tcf_block() local
[all...]
H A Dsch_dualpi2.c108 u64 last_qdelay; /* Q delay val at the last probability update */
147 static u64 head_enqueue_time(struct Qdisc *q) in head_enqueue_time() argument
149 struct sk_buff *skb = qdisc_peek_head(q); in head_enqueue_time()
170 static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q) in next_pi2_timeout() argument
172 return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate); in next_pi2_timeout()
185 static bool skb_apply_step(struct sk_buff *skb, struct dualpi2_sched_data *q) in skb_apply_step() argument
187 return skb_is_l4s(skb) && qdisc_qlen(q->l_queue) >= q->min_qlen_step; in skb_apply_step()
190 static bool dualpi2_mark(struct dualpi2_sched_data *q, struct sk_buff *skb) in dualpi2_mark() argument
193 q in dualpi2_mark()
199 dualpi2_reset_c_protection(struct dualpi2_sched_data * q) dualpi2_reset_c_protection() argument
209 dualpi2_calculate_c_protection(struct Qdisc * sch,struct dualpi2_sched_data * q,u32 wc) dualpi2_calculate_c_protection() argument
230 dualpi2_classic_marking(struct dualpi2_sched_data * q,struct sk_buff * skb,u32 prob,bool overload) dualpi2_classic_marking() argument
253 dualpi2_scalable_marking(struct dualpi2_sched_data * q,struct sk_buff * skb,u64 local_l_prob,u32 prob,bool overload) dualpi2_scalable_marking() argument
282 must_drop(struct Qdisc * sch,struct dualpi2_sched_data * q,struct sk_buff * skb) must_drop() argument
341 dualpi2_skb_classify(struct dualpi2_sched_data * q,struct sk_buff * skb) dualpi2_skb_classify() argument
388 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_enqueue_skb() local
444 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_qdisc_enqueue() local
517 dequeue_packet(struct Qdisc * sch,struct dualpi2_sched_data * q,int * credit_change,u64 now) dequeue_packet() argument
551 do_step_aqm(struct dualpi2_sched_data * q,struct sk_buff * skb,u64 now) do_step_aqm() argument
574 drop_and_retry(struct dualpi2_sched_data * q,struct sk_buff * skb,struct Qdisc * sch,enum skb_drop_reason reason) drop_and_retry() argument
585 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_qdisc_dequeue() local
626 get_queue_delays(struct dualpi2_sched_data * q,u64 * qdelay_c,u64 * qdelay_l) get_queue_delays() argument
641 struct dualpi2_sched_data *q = qdisc_priv(sch); calculate_probability() local
711 struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer); dualpi2_timer() local
762 struct dualpi2_sched_data *q; dualpi2_change() local
890 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_reset_default() local
916 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_init() local
946 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_dump() local
1022 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_dump_stats() local
1049 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_reset() local
1068 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_destroy() local
1093 dualpi2_unbind(struct Qdisc * q,unsigned long cl) dualpi2_unbind() argument
1100 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_tcf_block() local
[all...]
H A Dsch_red.c55 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument
57 return q->flags & TC_RED_ECN; in red_use_ecn()
60 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument
62 return q->flags & TC_RED_HARDDROP; in red_use_harddrop()
65 static int red_use_nodrop(struct red_sched_data *q) in red_use_nodrop() argument
67 return q->flags & TC_RED_NODROP; in red_use_nodrop()
74 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local
75 struct Qdisc *child = q->qdisc; in red_enqueue()
79 q->vars.qavg = red_calc_qavg(&q in red_enqueue()
155 struct red_sched_data *q = qdisc_priv(sch); red_dequeue() local
172 struct red_sched_data *q = qdisc_priv(sch); red_peek() local
180 struct red_sched_data *q = qdisc_priv(sch); red_reset() local
188 struct red_sched_data *q = qdisc_priv(sch); red_offload() local
217 struct red_sched_data *q = qdisc_priv(sch); red_destroy() local
240 struct red_sched_data *q = qdisc_priv(sch); __red_change() local
324 struct red_sched_data *q = timer_container_of(q, t, adapt_timer); red_adaptative_timer() local
340 struct red_sched_data *q = qdisc_priv(sch); red_init() local
374 struct red_sched_data *q = qdisc_priv(sch); red_change() local
413 struct red_sched_data *q = qdisc_priv(sch); red_dump() local
450 struct red_sched_data *q = qdisc_priv(sch); red_dump_stats() local
476 struct red_sched_data *q = qdisc_priv(sch); red_dump_class() local
501 struct red_sched_data *q = qdisc_priv(sch); red_graft() local
514 struct red_sched_data *q = qdisc_priv(sch); red_leaf() local
[all...]
H A Dsch_fq_pie.c75 static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q, in fq_pie_hash() argument
78 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_pie_hash()
84 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_classify() local
91 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_pie_classify()
94 filter = rcu_dereference_bh(q->filter_list); in fq_pie_classify()
96 return fq_pie_hash(q, skb) + 1; in fq_pie_classify()
112 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_pie_classify()
134 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_qdisc_enqueue() local
152 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue()
155 memory_limited = q in fq_pie_qdisc_enqueue()
240 struct fq_pie_sched_data *q = qdisc_priv(sch); fq_pie_qdisc_dequeue() local
290 struct fq_pie_sched_data *q = qdisc_priv(sch); fq_pie_change() local
387 struct fq_pie_sched_data *q = timer_container_of(q, t, adapt_timer); fq_pie_timer() local
421 struct fq_pie_sched_data *q = qdisc_priv(sch); fq_pie_init() local
474 struct fq_pie_sched_data *q = qdisc_priv(sch); fq_pie_dump() local
510 struct fq_pie_sched_data *q = qdisc_priv(sch); fq_pie_dump_stats() local
535 struct fq_pie_sched_data *q = qdisc_priv(sch); fq_pie_reset() local
554 struct fq_pie_sched_data *q = qdisc_priv(sch); fq_pie_destroy() local
[all...]
/linux/sound/core/seq/
H A Dseq_queue.c50 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument
57 queue_list[i] = q; in queue_list_add()
58 q->queue = i; in queue_list_add()
68 struct snd_seq_queue *q; in queue_list_remove() local
71 q = queue_list[id]; in queue_list_remove()
72 if (q) { in queue_list_remove()
73 guard(spinlock)(&q->owner_lock); in queue_list_remove()
74 if (q->owner == client) { in queue_list_remove()
76 q->klocked = 1; in queue_list_remove()
79 return q; in queue_list_remove()
90 struct snd_seq_queue *q; queue_new() local
121 queue_delete(struct snd_seq_queue * q) queue_delete() argument
162 struct snd_seq_queue *q; snd_seq_queue_alloc() local
181 struct snd_seq_queue *q; snd_seq_queue_delete() local
197 struct snd_seq_queue *q; queueptr() local
212 struct snd_seq_queue *q; snd_seq_queue_find_name() local
230 snd_seq_check_queue(struct snd_seq_queue * q,int atomic,int hop) snd_seq_check_queue() argument
289 struct snd_seq_queue *q; snd_seq_enqueue_event() local
340 check_access(struct snd_seq_queue * q,int client) check_access() argument
348 queue_access_lock(struct snd_seq_queue * q,int client) queue_access_lock() argument
360 queue_access_unlock(struct snd_seq_queue * q) queue_access_unlock() argument
369 struct snd_seq_queue *q = queueptr(queueid); snd_seq_queue_check_access() local
387 struct snd_seq_queue *q = queueptr(queueid); snd_seq_queue_set_owner() local
453 struct snd_seq_queue *q = queueptr(queueid); snd_seq_queue_timer_set_tempo() local
517 struct snd_seq_queue *q; snd_seq_queue_is_used() local
538 struct snd_seq_queue *q; snd_seq_queue_client_leave() local
571 struct snd_seq_queue *q; snd_seq_queue_remove_cells() local
592 queue_broadcast_event(struct snd_seq_queue * q,struct snd_seq_event * ev,int atomic,int hop) queue_broadcast_event() argument
615 snd_seq_queue_process_event(struct snd_seq_queue * q,struct snd_seq_event * ev,int atomic,int hop) snd_seq_queue_process_event() argument
670 struct snd_seq_queue *q; snd_seq_control_queue() local
700 struct snd_seq_queue *q; snd_seq_info_queues_read() local
[all...]
/linux/net/xdp/
H A Dxsk_queue.h120 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) in __xskq_cons_read_addr_unchecked() argument
122 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in __xskq_cons_read_addr_unchecked()
123 u32 idx = cached_cons & q->ring_mask; in __xskq_cons_read_addr_unchecked()
128 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_read_addr_unchecked() argument
130 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
131 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked()
192 static inline bool xskq_has_descs(struct xsk_queue *q) in xskq_has_descs() argument
194 return q in xskq_has_descs()
197 xskq_cons_is_valid_desc(struct xsk_queue * q,struct xdp_desc * d,struct xsk_buff_pool * pool) xskq_cons_is_valid_desc() argument
208 xskq_cons_read_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool) xskq_cons_read_desc() argument
224 xskq_cons_release_n(struct xsk_queue * q,u32 cnt) xskq_cons_release_n() argument
229 parse_desc(struct xsk_queue * q,struct xsk_buff_pool * pool,struct xdp_desc * desc,struct parsed_desc * parsed) parse_desc() argument
237 xskq_cons_read_desc_batch(struct xsk_queue * q,struct xsk_buff_pool * pool,u32 max) xskq_cons_read_desc_batch() argument
279 __xskq_cons_release(struct xsk_queue * q) __xskq_cons_release() argument
284 __xskq_cons_peek(struct xsk_queue * q) __xskq_cons_peek() argument
290 xskq_cons_get_entries(struct xsk_queue * q) xskq_cons_get_entries() argument
296 xskq_cons_nb_entries(struct xsk_queue * q,u32 max) xskq_cons_nb_entries() argument
309 xskq_cons_peek_addr_unchecked(struct xsk_queue * q,u64 * addr) xskq_cons_peek_addr_unchecked() argument
316 xskq_cons_peek_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool) xskq_cons_peek_desc() argument
329 xskq_cons_release(struct xsk_queue * q) xskq_cons_release() argument
334 xskq_cons_cancel_n(struct xsk_queue * q,u32 cnt) xskq_cons_cancel_n() argument
339 xskq_cons_present_entries(struct xsk_queue * q) xskq_cons_present_entries() argument
347 xskq_prod_nb_free(struct xsk_queue * q,u32 max) xskq_prod_nb_free() argument
361 xskq_prod_is_full(struct xsk_queue * q) xskq_prod_is_full() argument
366 xskq_prod_cancel_n(struct xsk_queue * q,u32 cnt) xskq_prod_cancel_n() argument
371 xskq_prod_reserve(struct xsk_queue * q) xskq_prod_reserve() argument
381 xskq_prod_reserve_addr(struct xsk_queue * q,u64 addr) xskq_prod_reserve_addr() argument
393 xskq_prod_write_addr_batch(struct xsk_queue * q,struct xdp_desc * descs,u32 nb_entries) xskq_prod_write_addr_batch() argument
406 xskq_prod_reserve_desc(struct xsk_queue * q,u64 addr,u32 len,u32 flags) xskq_prod_reserve_desc() argument
424 __xskq_prod_submit(struct xsk_queue * q,u32 idx) __xskq_prod_submit() argument
429 xskq_prod_submit(struct xsk_queue * q) xskq_prod_submit() argument
434 xskq_prod_submit_n(struct xsk_queue * q,u32 nb_entries) xskq_prod_submit_n() argument
439 xskq_prod_is_empty(struct xsk_queue * q) xskq_prod_is_empty() argument
447 xskq_nb_invalid_descs(struct xsk_queue * q) xskq_nb_invalid_descs() argument
452 xskq_nb_queue_empty_descs(struct xsk_queue * q) xskq_nb_queue_empty_descs() argument
[all...]
/linux/drivers/media/common/videobuf2/
H A Dvideobuf2-core.c47 #define dprintk(q, level, fmt, arg...) \ argument
50 pr_info("[%s] %s: " fmt, (q)->name, __func__, \
103 #define log_qop(q, op) \ argument
104 dprintk(q, 2, "call_qop(%s)%s\n", #op, \
105 (q)->ops->op ? "" : " (nop)")
107 #define call_qop(q, op, args...) \ argument
111 log_qop(q, op); \
112 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
114 (q)
118 call_void_qop(q,op,args...) global() argument
167 call_qop(q,op,args...) global() argument
170 call_void_qop(q,op,args...) global() argument
187 call_bufop(q,op,args...) global() argument
195 call_void_bufop(q,op,args...) global() argument
225 struct vb2_queue *q = vb->vb2_queue; __vb2_buf_mem_alloc() local
380 struct vb2_queue *q = vb->vb2_queue; __setup_offsets() local
408 init_buffer_cache_hints(struct vb2_queue * q,struct vb2_buffer * vb) init_buffer_cache_hints() argument
435 vb2_queue_add_buffer(struct vb2_queue * q,struct vb2_buffer * vb,unsigned int index) vb2_queue_add_buffer() argument
465 __vb2_queue_alloc(struct vb2_queue * q,enum vb2_memory memory,unsigned int num_buffers,unsigned int num_planes,const unsigned int plane_sizes[VB2_MAX_PLANES],unsigned int * first_index) __vb2_queue_alloc() argument
559 __vb2_free_mem(struct vb2_queue * q,unsigned int start,unsigned int count) __vb2_free_mem() argument
584 __vb2_queue_free(struct vb2_queue * q,unsigned int start,unsigned int count) __vb2_queue_free() argument
700 vb2_buffer_in_use(struct vb2_queue * q,struct vb2_buffer * vb) vb2_buffer_in_use() argument
722 __buffers_in_use(struct vb2_queue * q) __buffers_in_use() argument
737 vb2_core_querybuf(struct vb2_queue * q,struct vb2_buffer * vb,void * pb) vb2_core_querybuf() argument
747 __verify_userptr_ops(struct vb2_queue * q) __verify_userptr_ops() argument
760 __verify_mmap_ops(struct vb2_queue * q) __verify_mmap_ops() argument
773 __verify_dmabuf_ops(struct vb2_queue * q) __verify_dmabuf_ops() argument
783 vb2_verify_memory_type(struct vb2_queue * q,enum vb2_memory memory,unsigned int type) vb2_verify_memory_type() argument
829 set_queue_coherency(struct vb2_queue * q,bool non_coherent_mem) set_queue_coherency() argument
838 verify_coherency_flags(struct vb2_queue * q,bool non_coherent_mem) verify_coherency_flags() argument
847 vb2_core_allocated_buffers_storage(struct vb2_queue * q) vb2_core_allocated_buffers_storage() argument
865 vb2_core_free_buffers_storage(struct vb2_queue * q) vb2_core_free_buffers_storage() argument
873 vb2_core_reqbufs(struct vb2_queue * q,enum vb2_memory memory,unsigned int flags,unsigned int * count) vb2_core_reqbufs() argument
1038 vb2_core_create_bufs(struct vb2_queue * q,enum vb2_memory memory,unsigned int flags,unsigned int * count,unsigned int requested_planes,const unsigned int requested_sizes[],unsigned int * first_index) vb2_core_create_bufs() argument
1184 struct vb2_queue *q = vb->vb2_queue; vb2_buffer_done() local
1238 vb2_discard_done(struct vb2_queue * q) vb2_discard_done() argument
1268 struct vb2_queue *q = vb->vb2_queue; __prepare_userptr() local
1384 struct vb2_queue *q = vb->vb2_queue; __prepare_dmabuf() local
1539 struct vb2_queue *q = vb->vb2_queue; __enqueue_in_driver() local
1551 struct vb2_queue *q = vb->vb2_queue; __buf_prepare() local
1699 vb2_core_prepare_buf(struct vb2_queue * q,struct vb2_buffer * vb,void * pb) vb2_core_prepare_buf() argument
1726 vb2_core_remove_bufs(struct vb2_queue * q,unsigned int start,unsigned int count) vb2_core_remove_bufs() argument
1775 vb2_start_streaming(struct vb2_queue * q) vb2_start_streaming() argument
1831 vb2_core_qbuf(struct vb2_queue * q,struct vb2_buffer * vb,void * pb,struct media_request * req) vb2_core_qbuf() argument
1988 __vb2_wait_for_done_vb(struct vb2_queue * q,int nonblocking) __vb2_wait_for_done_vb() argument
2076 __vb2_get_done_vb(struct vb2_queue * q,struct vb2_buffer ** vb,void * pb,int nonblocking) __vb2_get_done_vb() argument
2109 vb2_wait_for_all_buffers(struct vb2_queue * q) vb2_wait_for_all_buffers() argument
2127 struct vb2_queue *q = vb->vb2_queue; __vb2_dqbuf() local
2138 vb2_core_dqbuf(struct vb2_queue * q,unsigned int * pindex,void * pb,bool nonblocking) vb2_core_dqbuf() argument
2202 __vb2_queue_cancel(struct vb2_queue * q) __vb2_queue_cancel() argument
2313 vb2_core_streamon(struct vb2_queue * q,unsigned int type) vb2_core_streamon() argument
2364 vb2_queue_error(struct vb2_queue * q) vb2_queue_error() argument
2372 vb2_core_streamoff(struct vb2_queue * q,unsigned int type) vb2_core_streamoff() argument
2400 __find_plane_by_offset(struct vb2_queue * q,unsigned long offset,struct vb2_buffer ** vb,unsigned int * plane) __find_plane_by_offset() argument
2434 vb2_core_expbuf(struct vb2_queue * q,int * fd,unsigned int type,struct vb2_buffer * vb,unsigned int plane,unsigned int flags) vb2_core_expbuf() argument
2499 vb2_mmap(struct vb2_queue * q,struct vm_area_struct * vma) vb2_mmap() argument
2569 vb2_get_unmapped_area(struct vb2_queue * q,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags) vb2_get_unmapped_area() argument
2602 vb2_core_queue_init(struct vb2_queue * q) vb2_core_queue_init() argument
2690 vb2_core_queue_release(struct vb2_queue * q) vb2_core_queue_release() argument
2702 vb2_core_poll(struct vb2_queue * q,struct file * file,poll_table * wait) vb2_core_poll() argument
2851 __vb2_init_fileio(struct vb2_queue * q,int read) __vb2_init_fileio() argument
2985 __vb2_cleanup_fileio(struct vb2_queue * q) __vb2_cleanup_fileio() argument
3009 __vb2_perform_fileio(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblock,int read) __vb2_perform_fileio() argument
3173 vb2_read(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblocking) vb2_read() argument
3180 vb2_write(struct vb2_queue * q,const char __user * data,size_t count,loff_t * ppos,int nonblocking) vb2_write() argument
3197 struct vb2_queue *q = data; vb2_thread() local
3275 vb2_thread_start(struct vb2_queue * q,vb2_thread_fnc fnc,void * priv,const char * thread_name) vb2_thread_start() argument
3315 vb2_thread_stop(struct vb2_queue * q) vb2_thread_stop() argument
[all...]
/linux/drivers/net/wireless/mediatek/mt76/
H A Ddma.c189 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument
191 Q_WRITE(q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
192 if (q->flags & MT_QFLAG_WED_RRO_EN) in mt76_dma_sync_idx()
193 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); in mt76_dma_sync_idx()
195 Q_WRITE(q, ring_size, q->ndesc); in mt76_dma_sync_idx()
196 q->head = Q_READ(q, dma_id in mt76_dma_sync_idx()
200 __mt76_dma_queue_reset(struct mt76_dev * dev,struct mt76_queue * q,bool reset_idx) __mt76_dma_queue_reset() argument
221 mt76_dma_queue_reset(struct mt76_dev * dev,struct mt76_queue * q) mt76_dma_queue_reset() argument
227 mt76_dma_add_rx_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,void * data) mt76_dma_add_rx_buf() argument
285 mt76_dma_add_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,int nbufs,u32 info,struct sk_buff * skb,void * txwi) mt76_dma_add_buf() argument
354 mt76_dma_tx_cleanup_idx(struct mt76_dev * dev,struct mt76_queue * q,int idx,struct mt76_queue_entry * prev_e) mt76_dma_tx_cleanup_idx() argument
375 mt76_dma_kick_queue(struct mt76_dev * dev,struct mt76_queue * q) mt76_dma_kick_queue() argument
382 mt76_dma_tx_cleanup(struct mt76_dev * dev,struct mt76_queue * q,bool flush) mt76_dma_tx_cleanup() argument
422 mt76_dma_get_buf(struct mt76_dev * dev,struct mt76_queue * q,int idx,int * len,u32 * info,bool * more,bool * drop) mt76_dma_get_buf() argument
476 mt76_dma_dequeue(struct mt76_dev * dev,struct mt76_queue * q,bool flush,int * len,u32 * info,bool * more,bool * drop) mt76_dma_dequeue() argument
502 mt76_dma_tx_queue_skb_raw(struct mt76_dev * dev,struct mt76_queue * q,struct sk_buff * skb,u32 tx_info) mt76_dma_tx_queue_skb_raw() argument
535 mt76_dma_tx_queue_skb(struct mt76_phy * phy,struct mt76_queue * q,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta) mt76_dma_tx_queue_skb() argument
635 mt76_dma_rx_fill_buf(struct mt76_dev * dev,struct mt76_queue * q,bool allow_direct) mt76_dma_rx_fill_buf() argument
674 mt76_dma_rx_fill(struct mt76_dev * dev,struct mt76_queue * q,bool allow_direct) mt76_dma_rx_fill() argument
690 mt76_dma_alloc_queue(struct mt76_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize,u32 ring_base) mt76_dma_alloc_queue() argument
749 mt76_dma_rx_cleanup(struct mt76_dev * dev,struct mt76_queue * q) mt76_dma_rx_cleanup() argument
781 struct mt76_queue *q = &dev->q_rx[qid]; mt76_dma_rx_reset() local
810 mt76_add_fragment(struct mt76_dev * dev,struct mt76_queue * q,void * data,int len,bool more,u32 info,bool allow_direct) mt76_add_fragment() argument
837 mt76_dma_rx_process(struct mt76_dev * dev,struct mt76_queue * q,int budget) mt76_dma_rx_process() argument
1027 struct mt76_queue *q = &dev->q_rx[i]; mt76_dma_cleanup() local
[all...]
/linux/drivers/spi/
H A Dspi-fsl-qspi.c277 static inline int needs_swap_endian(struct fsl_qspi *q) in needs_swap_endian() argument
279 return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; in needs_swap_endian()
282 static inline int needs_4x_clock(struct fsl_qspi *q) in needs_4x_clock() argument
284 return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; in needs_4x_clock()
287 static inline int needs_fill_txfifo(struct fsl_qspi *q) in needs_fill_txfifo() argument
289 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890; in needs_fill_txfifo()
292 static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) in needs_wakeup_wait_mode() argument
294 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618; in needs_wakeup_wait_mode()
297 static inline int needs_amba_base_offset(struct fsl_qspi *q) in needs_amba_base_offset() argument
299 return !(q in needs_amba_base_offset()
302 needs_tdh_setting(struct fsl_qspi * q) needs_tdh_setting() argument
311 fsl_qspi_endian_xchg(struct fsl_qspi * q,u32 a) fsl_qspi_endian_xchg() argument
323 qspi_writel(struct fsl_qspi * q,u32 val,void __iomem * addr) qspi_writel() argument
331 qspi_readl(struct fsl_qspi * q,void __iomem * addr) qspi_readl() argument
341 struct fsl_qspi *q = dev_id; fsl_qspi_irq_handler() local
355 fsl_qspi_check_buswidth(struct fsl_qspi * q,u8 width) fsl_qspi_check_buswidth() argument
370 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_supports_op() local
415 fsl_qspi_prepare_lut(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_prepare_lut() argument
471 fsl_qspi_clk_prep_enable(struct fsl_qspi * q) fsl_qspi_clk_prep_enable() argument
491 fsl_qspi_clk_disable_unprep(struct fsl_qspi * q) fsl_qspi_clk_disable_unprep() argument
507 fsl_qspi_invalidate(struct fsl_qspi * q) fsl_qspi_invalidate() argument
525 fsl_qspi_select_mem(struct fsl_qspi * q,struct spi_device * spi,const struct spi_mem_op * op) fsl_qspi_select_mem() argument
552 fsl_qspi_read_ahb(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_read_ahb() argument
559 fsl_qspi_fill_txfifo(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_fill_txfifo() argument
584 fsl_qspi_read_rxfifo(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_read_rxfifo() argument
605 fsl_qspi_do_op(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_do_op() argument
630 fsl_qspi_readl_poll_tout(struct fsl_qspi * q,void __iomem * base,u32 mask,u32 delay_us,u32 timeout_us) fsl_qspi_readl_poll_tout() argument
644 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_exec_op() local
706 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_adjust_op_size() local
721 fsl_qspi_default_setup(struct fsl_qspi * q) fsl_qspi_default_setup() argument
812 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_get_name() local
849 struct fsl_qspi *q = data; fsl_qspi_disable() local
858 struct fsl_qspi *q = data; fsl_qspi_cleanup() local
871 struct fsl_qspi *q; fsl_qspi_probe() local
965 struct fsl_qspi *q = dev_get_drvdata(dev); fsl_qspi_resume() local
[all...]
/linux/drivers/net/wireless/broadcom/b43/
H A Dpio.c24 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument
37 cookie = (((u16)q->index + 1) << 12); in generate_cookie()
49 struct b43_pio_txqueue *q = NULL; in parse_cookie() local
54 q = pio->tx_queue_AC_BK; in parse_cookie()
57 q = pio->tx_queue_AC_BE; in parse_cookie()
60 q = pio->tx_queue_AC_VI; in parse_cookie()
63 q = pio->tx_queue_AC_VO; in parse_cookie()
66 q = pio->tx_queue_mcast; in parse_cookie()
69 if (B43_WARN_ON(!q)) in parse_cookie()
72 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q in parse_cookie()
126 struct b43_pio_txqueue *q; b43_setup_pioqueue_tx() local
162 struct b43_pio_rxqueue *q; b43_setup_pioqueue_rx() local
178 b43_pio_cancel_tx_packets(struct b43_pio_txqueue * q) b43_pio_cancel_tx_packets() argument
192 b43_destroy_pioqueue_tx(struct b43_pio_txqueue * q,const char * name) b43_destroy_pioqueue_tx() argument
201 b43_destroy_pioqueue_rx(struct b43_pio_rxqueue * q,const char * name) b43_destroy_pioqueue_rx() argument
290 struct b43_pio_txqueue *q; select_queue_by_priority() local
317 tx_write_2byte_queue(struct b43_pio_txqueue * q,u16 ctl,const void * _data,unsigned int data_len) tx_write_2byte_queue() argument
352 struct b43_pio_txqueue *q = pack->queue; pio_tx_frame_2byte_queue() local
370 tx_write_4byte_queue(struct b43_pio_txqueue * q,u32 ctl,const void * _data,unsigned int data_len) tx_write_4byte_queue() argument
422 struct b43_pio_txqueue *q = pack->queue; pio_tx_frame_4byte_queue() local
440 pio_tx_frame(struct b43_pio_txqueue * q,struct sk_buff * skb) pio_tx_frame() argument
491 struct b43_pio_txqueue *q; b43_pio_tx() local
566 struct b43_pio_txqueue *q; b43_pio_handle_txstatus() local
596 pio_rx_frame(struct b43_pio_rxqueue * q) pio_rx_frame() argument
762 b43_pio_rx(struct b43_pio_rxqueue * q) b43_pio_rx() argument
777 b43_pio_tx_suspend_queue(struct b43_pio_txqueue * q) b43_pio_tx_suspend_queue() argument
790 b43_pio_tx_resume_queue(struct b43_pio_txqueue * q) b43_pio_tx_resume_queue() argument
[all...]
/linux/drivers/s390/cio/
H A Dqdio_main.c107 * @q: queue to manipulate
116 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument
119 int tmp_count = count, tmp_start = start, nr = q->nr; in qdio_do_eqbs()
122 qperf_inc(q, eqbs); in qdio_do_eqbs()
124 if (!q->is_input_q) in qdio_do_eqbs()
125 nr += q->irq_ptr->nr_input_qs; in qdio_do_eqbs()
127 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, in qdio_do_eqbs()
137 qperf_inc(q, eqbs_partial); in qdio_do_eqbs()
138 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", in qdio_do_eqbs()
143 DBF_DEV_EVENT(DBF_WARN, q in qdio_do_eqbs()
166 qdio_do_sqbs(struct qdio_q * q,unsigned char state,int start,int count) qdio_do_sqbs() argument
205 get_buf_states(struct qdio_q * q,unsigned int bufnr,unsigned char * state,unsigned int count,int auto_ack) get_buf_states() argument
235 get_buf_state(struct qdio_q * q,unsigned int bufnr,unsigned char * state,int auto_ack) get_buf_state() argument
242 set_buf_states(struct qdio_q * q,int bufnr,unsigned char state,int count) set_buf_states() argument
264 set_buf_state(struct qdio_q * q,int bufnr,unsigned char state) set_buf_state() argument
273 struct qdio_q *q; qdio_init_buf_states() local
284 qdio_siga_sync(struct qdio_q * q,unsigned int output,unsigned int input) qdio_siga_sync() argument
305 qdio_sync_input_queue(struct qdio_q * q) qdio_sync_input_queue() argument
310 qdio_sync_output_queue(struct qdio_q * q) qdio_sync_output_queue() argument
315 qdio_siga_sync_q(struct qdio_q * q) qdio_siga_sync_q() argument
323 qdio_siga_output(struct qdio_q * q,unsigned int count,unsigned int * busy_bit,dma64_t aob) qdio_siga_output() argument
364 qdio_siga_input(struct qdio_q * q) qdio_siga_input() argument
384 debug_get_buf_state(struct qdio_q * q,unsigned int bufnr,unsigned char * state) debug_get_buf_state() argument
392 qdio_stop_polling(struct qdio_q * q) qdio_stop_polling() argument
405 account_sbals(struct qdio_q * q,unsigned int count) account_sbals() argument
411 process_buffer_error(struct qdio_q * q,unsigned int start,int count) process_buffer_error() argument
430 inbound_handle_work(struct qdio_q * q,unsigned int start,int count,bool auto_ack) inbound_handle_work() argument
442 get_inbound_buffer_frontier(struct qdio_q * q,unsigned int start,unsigned int * error) get_inbound_buffer_frontier() argument
506 struct qdio_q *q; qdio_inspect_input_queue() local
526 qdio_inbound_q_done(struct qdio_q * q,unsigned int start) qdio_inbound_q_done() argument
544 get_outbound_buffer_frontier(struct qdio_q * q,unsigned int start,unsigned int * error) get_outbound_buffer_frontier() argument
610 struct qdio_q *q; qdio_inspect_output_queue() local
630 qdio_kick_outbound_q(struct qdio_q * q,unsigned int count,dma64_t aob) qdio_kick_outbound_q() argument
1210 handle_inbound(struct qdio_q * q,int bufnr,int count) handle_inbound() argument
1269 handle_outbound(struct qdio_q * q,unsigned int bufnr,unsigned int count,struct qaob * aob) handle_outbound() argument
1343 struct qdio_q *q; qdio_start_irq() local
[all...]
/linux/drivers/infiniband/sw/rxe/
H A Drxe_queue.c46 inline void rxe_queue_reset(struct rxe_queue *q) in rxe_queue_reset() argument
52 memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); in rxe_queue_reset()
58 struct rxe_queue *q; in rxe_queue_init() local
66 q = kzalloc(sizeof(*q), GFP_KERNEL); in rxe_queue_init()
67 if (!q) in rxe_queue_init()
70 q->rxe = rxe; in rxe_queue_init()
71 q->type = type; in rxe_queue_init()
74 q in rxe_queue_init()
110 resize_finish(struct rxe_queue * q,struct rxe_queue * new_q,unsigned int num_elem) resize_finish() argument
147 rxe_queue_resize(struct rxe_queue * q,unsigned int * num_elem_p,unsigned int elem_size,struct ib_udata * udata,struct mminfo __user * outbuf,spinlock_t * producer_lock,spinlock_t * consumer_lock) rxe_queue_resize() argument
193 rxe_queue_cleanup(struct rxe_queue * q) rxe_queue_cleanup() argument
[all...]
H A Drxe_queue.h18 * - The driver indices are always masked off to q->index_mask
26 * - By passing the type in the parameter list separate from q
83 void rxe_queue_reset(struct rxe_queue *q);
88 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
95 static inline u32 queue_next_index(struct rxe_queue *q, int index) in queue_next_index() argument
97 return (index + 1) & q->index_mask; in queue_next_index()
100 static inline u32 queue_get_producer(const struct rxe_queue *q, in queue_get_producer() argument
108 prod = smp_load_acquire(&q->buf->producer_index); in queue_get_producer()
112 prod = q->index; in queue_get_producer()
116 prod = q in queue_get_producer()
127 queue_get_consumer(const struct rxe_queue * q,enum queue_type type) queue_get_consumer() argument
154 queue_empty(struct rxe_queue * q,enum queue_type type) queue_empty() argument
162 queue_full(struct rxe_queue * q,enum queue_type type) queue_full() argument
170 queue_count(const struct rxe_queue * q,enum queue_type type) queue_count() argument
179 queue_advance_producer(struct rxe_queue * q,enum queue_type type) queue_advance_producer() argument
215 queue_advance_consumer(struct rxe_queue * q,enum queue_type type) queue_advance_consumer() argument
250 queue_producer_addr(struct rxe_queue * q,enum queue_type type) queue_producer_addr() argument
258 queue_consumer_addr(struct rxe_queue * q,enum queue_type type) queue_consumer_addr() argument
266 queue_addr_from_index(struct rxe_queue * q,u32 index) queue_addr_from_index() argument
272 queue_index_from_addr(const struct rxe_queue * q,const void * addr) queue_index_from_addr() argument
279 queue_head(struct rxe_queue * q,enum queue_type type) queue_head() argument
[all...]
/linux/block/
H A Dblk-pm.c10 * @q: the queue of the device
14 * Initialize runtime-PM-related fields for @q and start auto suspend for
17 * request queue @q has been allocated, and runtime PM for it can not happen
29 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) in blk_pm_runtime_init() argument
31 q->dev = dev; in blk_pm_runtime_init()
32 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init()
33 pm_runtime_set_autosuspend_delay(q->dev, -1); in blk_pm_runtime_init()
34 pm_runtime_use_autosuspend(q->dev); in blk_pm_runtime_init()
40 * @q: the queue of the device
59 int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument
120 blk_post_runtime_suspend(struct request_queue * q,int err) blk_post_runtime_suspend() argument
150 blk_pre_runtime_resume(struct request_queue * q) blk_pre_runtime_resume() argument
174 blk_post_runtime_resume(struct request_queue * q) blk_post_runtime_resume() argument
[all...]

12345678910>>...112