Home
last modified time | relevance | path

Searched defs:q (Results 1 – 25 of 1015) sorted by relevance

12345678910>>...41

/linux/drivers/gpu/drm/xe/ !
H A Dxe_guc_submit.c49 exec_queue_to_guc(struct xe_exec_queue *q) in exec_queue_to_guc() argument
72 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument
77 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument
82 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument
87 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument
92 set_exec_queue_enabled(struct xe_exec_queue * q) set_exec_queue_enabled() argument
97 clear_exec_queue_enabled(struct xe_exec_queue * q) clear_exec_queue_enabled() argument
102 exec_queue_pending_enable(struct xe_exec_queue * q) exec_queue_pending_enable() argument
107 set_exec_queue_pending_enable(struct xe_exec_queue * q) set_exec_queue_pending_enable() argument
112 clear_exec_queue_pending_enable(struct xe_exec_queue * q) clear_exec_queue_pending_enable() argument
117 exec_queue_pending_disable(struct xe_exec_queue * q) exec_queue_pending_disable() argument
122 set_exec_queue_pending_disable(struct xe_exec_queue * q) set_exec_queue_pending_disable() argument
127 clear_exec_queue_pending_disable(struct xe_exec_queue * q) clear_exec_queue_pending_disable() argument
132 exec_queue_destroyed(struct xe_exec_queue * q) exec_queue_destroyed() argument
137 set_exec_queue_destroyed(struct xe_exec_queue * q) set_exec_queue_destroyed() argument
142 exec_queue_banned(struct xe_exec_queue * q) exec_queue_banned() argument
147 set_exec_queue_banned(struct xe_exec_queue * q) set_exec_queue_banned() argument
152 exec_queue_suspended(struct xe_exec_queue * q) exec_queue_suspended() argument
157 set_exec_queue_suspended(struct xe_exec_queue * q) set_exec_queue_suspended() argument
162 clear_exec_queue_suspended(struct xe_exec_queue * q) clear_exec_queue_suspended() argument
167 exec_queue_reset(struct xe_exec_queue * q) exec_queue_reset() argument
172 set_exec_queue_reset(struct xe_exec_queue * q) set_exec_queue_reset() argument
177 exec_queue_killed(struct xe_exec_queue * q) exec_queue_killed() argument
182 set_exec_queue_killed(struct xe_exec_queue * q) set_exec_queue_killed() argument
187 exec_queue_wedged(struct xe_exec_queue * q) exec_queue_wedged() argument
192 set_exec_queue_wedged(struct xe_exec_queue * q) set_exec_queue_wedged() argument
197 exec_queue_check_timeout(struct xe_exec_queue * q) exec_queue_check_timeout() argument
202 set_exec_queue_check_timeout(struct xe_exec_queue * q) set_exec_queue_check_timeout() argument
207 clear_exec_queue_check_timeout(struct xe_exec_queue * q) clear_exec_queue_check_timeout() argument
212 exec_queue_extra_ref(struct xe_exec_queue * q) exec_queue_extra_ref() argument
217 set_exec_queue_extra_ref(struct xe_exec_queue * q) set_exec_queue_extra_ref() argument
222 exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue * q) exec_queue_killed_or_banned_or_wedged() argument
250 struct xe_exec_queue *q; guc_submit_wedged_fini() local
319 __release_guc_id(struct xe_guc * guc,struct xe_exec_queue * q,u32 xa_count) __release_guc_id() argument
335 alloc_guc_id(struct xe_guc * guc,struct xe_exec_queue * q) alloc_guc_id() argument
371 release_guc_id(struct xe_guc * guc,struct xe_exec_queue * q) release_guc_id() argument
427 init_policies(struct xe_guc * guc,struct xe_exec_queue * q) init_policies() argument
451 set_min_preemption_timeout(struct xe_guc * guc,struct xe_exec_queue * q) set_min_preemption_timeout() argument
470 __register_mlrc_exec_queue(struct xe_guc * guc,struct xe_exec_queue * q,struct guc_ctxt_registration_info * info) __register_mlrc_exec_queue() argument
545 register_exec_queue(struct xe_exec_queue * q) register_exec_queue() argument
599 wq_space_until_wrap(struct xe_exec_queue * q) wq_space_until_wrap() argument
604 wq_wait_for_space(struct xe_exec_queue * q,u32 wqi_size) wq_wait_for_space() argument
632 wq_noop_append(struct xe_exec_queue * q) wq_noop_append() argument
652 wq_item_append(struct xe_exec_queue * q) wq_item_append() argument
697 submit_exec_queue(struct xe_exec_queue * q) submit_exec_queue() argument
752 struct xe_exec_queue *q = job->q; guc_exec_queue_run_job() local
793 MAKE_SCHED_CONTEXT_ACTION(q,enable_disable) global() argument
801 disable_scheduling_deregister(struct xe_guc * guc,struct xe_exec_queue * q) disable_scheduling_deregister() argument
837 xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue * q) xe_guc_exec_queue_trigger_cleanup() argument
861 struct xe_exec_queue *q; xe_guc_submit_wedge() local
908 struct xe_exec_queue *q = ge->q; xe_guc_exec_queue_lr_cleanup() local
966 check_timeout(struct xe_exec_queue * q,struct xe_sched_job * job) check_timeout() argument
1007 enable_scheduling(struct xe_exec_queue * q) enable_scheduling() argument
1036 disable_scheduling(struct xe_exec_queue * q,bool immediate) disable_scheduling() argument
1055 __deregister_exec_queue(struct xe_guc * guc,struct xe_exec_queue * q) __deregister_exec_queue() argument
1079 struct xe_exec_queue *q = job->q; guc_exec_queue_timedout_job() local
1284 struct xe_exec_queue *q = ge->q; __guc_exec_queue_fini_async() local
1307 guc_exec_queue_fini_async(struct xe_exec_queue * q) guc_exec_queue_fini_async() argument
1321 __guc_exec_queue_fini(struct xe_guc * guc,struct xe_exec_queue * q) __guc_exec_queue_fini() argument
1335 struct xe_exec_queue *q = msg->private_data; __guc_exec_queue_process_msg_cleanup() local
1347 guc_exec_queue_allowed_to_change_state(struct xe_exec_queue * q) guc_exec_queue_allowed_to_change_state() argument
1354 struct xe_exec_queue *q = msg->private_data; __guc_exec_queue_process_msg_set_sched_props() local
1362 __suspend_fence_signal(struct xe_exec_queue * q) __suspend_fence_signal() argument
1371 suspend_fence_signal(struct xe_exec_queue * q) suspend_fence_signal() argument
1384 struct xe_exec_queue *q = msg->private_data; __guc_exec_queue_process_msg_suspend() local
1413 struct xe_exec_queue *q = msg->private_data; __guc_exec_queue_process_msg_resume() local
1469 guc_exec_queue_init(struct xe_exec_queue * q) guc_exec_queue_init() argument
1538 guc_exec_queue_kill(struct xe_exec_queue * q) guc_exec_queue_kill() argument
1546 guc_exec_queue_add_msg(struct xe_exec_queue * q,struct xe_sched_msg * msg,u32 opcode) guc_exec_queue_add_msg() argument
1562 guc_exec_queue_try_add_msg(struct xe_exec_queue * q,struct xe_sched_msg * msg,u32 opcode) guc_exec_queue_try_add_msg() argument
1577 guc_exec_queue_fini(struct xe_exec_queue * q) guc_exec_queue_fini() argument
1587 guc_exec_queue_set_priority(struct xe_exec_queue * q,enum xe_exec_queue_priority priority) guc_exec_queue_set_priority() argument
1606 guc_exec_queue_set_timeslice(struct xe_exec_queue * q,u32 timeslice_us) guc_exec_queue_set_timeslice() argument
1624 guc_exec_queue_set_preempt_timeout(struct xe_exec_queue * q,u32 preempt_timeout_us) guc_exec_queue_set_preempt_timeout() argument
1643 guc_exec_queue_suspend(struct xe_exec_queue * q) guc_exec_queue_suspend() argument
1659 guc_exec_queue_suspend_wait(struct xe_exec_queue * q) guc_exec_queue_suspend_wait() argument
1686 guc_exec_queue_resume(struct xe_exec_queue * q) guc_exec_queue_resume() argument
1699 guc_exec_queue_reset_status(struct xe_exec_queue * q) guc_exec_queue_reset_status() argument
1723 guc_exec_queue_stop(struct xe_guc * guc,struct xe_exec_queue * q) guc_exec_queue_stop() argument
1805 struct xe_exec_queue *q; xe_guc_submit_stop() local
1829 guc_exec_queue_start(struct xe_exec_queue * q) guc_exec_queue_start() argument
1848 struct xe_exec_queue *q; xe_guc_submit_start() local
1873 struct xe_exec_queue *q; g2h_exec_queue_lookup() local
1892 deregister_exec_queue(struct xe_guc * guc,struct xe_exec_queue * q) deregister_exec_queue() argument
1909 handle_sched_done(struct xe_guc * guc,struct xe_exec_queue * q,u32 runnable_state) handle_sched_done() argument
1956 struct xe_exec_queue *q; xe_guc_sched_done_handler() local
1983 handle_deregister_done(struct xe_guc * guc,struct xe_exec_queue * q) handle_deregister_done() argument
1997 struct xe_exec_queue *q; xe_guc_deregister_done_handler() local
2025 struct xe_exec_queue *q; xe_guc_exec_queue_reset_handler() local
2087 struct xe_exec_queue *q; xe_guc_exec_queue_memory_cat_error_handler() local
2159 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue * q,struct xe_guc_submit_exec_queue_snapshot * snapshot) guc_exec_queue_wq_snapshot_capture() argument
2217 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue * q) xe_guc_exec_queue_snapshot_capture() argument
2371 guc_exec_queue_print(struct xe_exec_queue * q,struct drm_printer * p) guc_exec_queue_print() argument
2389 struct xe_exec_queue *q; xe_guc_submit_print() local
[all...]
H A Dxe_exec_queue.c40 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument
59 struct xe_exec_queue *q; __xe_exec_queue_alloc() local
115 __xe_exec_queue_init(struct xe_exec_queue * q) __xe_exec_queue_init() argument
159 struct xe_exec_queue *q; xe_exec_queue_create() local
242 struct xe_exec_queue *q; xe_exec_queue_create_bind() local
273 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); xe_exec_queue_destroy() local
289 xe_exec_queue_fini(struct xe_exec_queue * q) xe_exec_queue_fini() argument
307 xe_exec_queue_assign_name(struct xe_exec_queue * q,u32 instance) xe_exec_queue_assign_name() argument
335 struct xe_exec_queue *q; xe_exec_queue_lookup() local
353 exec_queue_set_priority(struct xe_device * xe,struct xe_exec_queue * q,u64 value) exec_queue_set_priority() argument
418 exec_queue_set_timeslice(struct xe_device * xe,struct xe_exec_queue * q,u64 value) exec_queue_set_timeslice() argument
435 exec_queue_set_pxp_type(struct xe_device * xe,struct xe_exec_queue * q,u64 value) exec_queue_set_pxp_type() argument
461 exec_queue_user_ext_set_property(struct xe_device * xe,struct xe_exec_queue * q,u64 extension) exec_queue_user_ext_set_property() argument
497 exec_queue_user_extensions(struct xe_device * xe,struct xe_exec_queue * q,u64 extensions,int ext_number) exec_queue_user_extensions() argument
593 struct xe_exec_queue *q = NULL; xe_exec_queue_create_ioctl() local
720 struct xe_exec_queue *q; xe_exec_queue_get_property_ioctl() local
750 xe_exec_queue_is_lr(struct xe_exec_queue * q) xe_exec_queue_is_lr() argument
756 xe_exec_queue_num_job_inflight(struct xe_exec_queue * q) xe_exec_queue_num_job_inflight() argument
767 xe_exec_queue_ring_full(struct xe_exec_queue * q) xe_exec_queue_ring_full() argument
789 xe_exec_queue_is_idle(struct xe_exec_queue * q) xe_exec_queue_is_idle() argument
815 xe_exec_queue_update_run_ticks(struct xe_exec_queue * q) xe_exec_queue_update_run_ticks() argument
856 xe_exec_queue_kill(struct xe_exec_queue * q) xe_exec_queue_kill() argument
876 struct xe_exec_queue *q; xe_exec_queue_destroy_ioctl() local
902 xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue * q,struct xe_vm * vm) xe_exec_queue_last_fence_lockdep_assert() argument
918 xe_exec_queue_last_fence_put(struct xe_exec_queue * q,struct xe_vm * vm) xe_exec_queue_last_fence_put() argument
931 xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue * q) xe_exec_queue_last_fence_put_unlocked() argument
948 xe_exec_queue_last_fence_get(struct xe_exec_queue * q,struct xe_vm * vm) xe_exec_queue_last_fence_get() argument
975 xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue * q,struct xe_vm * vm) xe_exec_queue_last_fence_get_for_resume() argument
1000 xe_exec_queue_last_fence_set(struct xe_exec_queue * q,struct xe_vm * vm,struct dma_fence * fence) xe_exec_queue_last_fence_set() argument
1017 xe_exec_queue_last_fence_test_dep(struct xe_exec_queue * q,struct xe_vm * vm) xe_exec_queue_last_fence_test_dep() argument
[all...]
/linux/net/xdp/ !
H A Dxsk_queue.h120 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) in __xskq_cons_read_addr_unchecked() argument
128 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_read_addr_unchecked() argument
192 static inline bool xskq_has_descs(struct xsk_queue *q) in xskq_has_descs() argument
197 xskq_cons_is_valid_desc(struct xsk_queue * q,struct xdp_desc * d,struct xsk_buff_pool * pool) xskq_cons_is_valid_desc() argument
208 xskq_cons_read_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool) xskq_cons_read_desc() argument
224 xskq_cons_release_n(struct xsk_queue * q,u32 cnt) xskq_cons_release_n() argument
229 parse_desc(struct xsk_queue * q,struct xsk_buff_pool * pool,struct xdp_desc * desc,struct parsed_desc * parsed) parse_desc() argument
237 xskq_cons_read_desc_batch(struct xsk_queue * q,struct xsk_buff_pool * pool,u32 max) xskq_cons_read_desc_batch() argument
279 __xskq_cons_release(struct xsk_queue * q) __xskq_cons_release() argument
284 __xskq_cons_peek(struct xsk_queue * q) __xskq_cons_peek() argument
290 xskq_cons_get_entries(struct xsk_queue * q) xskq_cons_get_entries() argument
296 xskq_cons_nb_entries(struct xsk_queue * q,u32 max) xskq_cons_nb_entries() argument
309 xskq_cons_peek_addr_unchecked(struct xsk_queue * q,u64 * addr) xskq_cons_peek_addr_unchecked() argument
316 xskq_cons_peek_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool) xskq_cons_peek_desc() argument
329 xskq_cons_release(struct xsk_queue * q) xskq_cons_release() argument
334 xskq_cons_cancel_n(struct xsk_queue * q,u32 cnt) xskq_cons_cancel_n() argument
339 xskq_cons_present_entries(struct xsk_queue * q) xskq_cons_present_entries() argument
347 xskq_prod_nb_free(struct xsk_queue * q,u32 max) xskq_prod_nb_free() argument
361 xskq_prod_is_full(struct xsk_queue * q) xskq_prod_is_full() argument
366 xskq_prod_cancel_n(struct xsk_queue * q,u32 cnt) xskq_prod_cancel_n() argument
371 xskq_prod_reserve(struct xsk_queue * q) xskq_prod_reserve() argument
381 xskq_prod_reserve_addr(struct xsk_queue * q,u64 addr) xskq_prod_reserve_addr() argument
393 xskq_prod_write_addr_batch(struct xsk_queue * q,struct xdp_desc * descs,u32 nb_entries) xskq_prod_write_addr_batch() argument
406 xskq_prod_reserve_desc(struct xsk_queue * q,u64 addr,u32 len,u32 flags) xskq_prod_reserve_desc() argument
424 __xskq_prod_submit(struct xsk_queue * q,u32 idx) __xskq_prod_submit() argument
429 xskq_prod_submit(struct xsk_queue * q) xskq_prod_submit() argument
434 xskq_prod_submit_n(struct xsk_queue * q,u32 nb_entries) xskq_prod_submit_n() argument
439 xskq_prod_is_empty(struct xsk_queue * q) xskq_prod_is_empty() argument
447 xskq_nb_invalid_descs(struct xsk_queue * q) xskq_nb_invalid_descs() argument
452 xskq_nb_queue_empty_descs(struct xsk_queue * q) xskq_nb_queue_empty_descs() argument
[all...]
/linux/sound/core/seq/ !
H A Dseq_queue.c50 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument
68 struct snd_seq_queue *q; in queue_list_remove() local
90 struct snd_seq_queue *q; queue_new() local
121 queue_delete(struct snd_seq_queue * q) queue_delete() argument
162 struct snd_seq_queue *q; snd_seq_queue_alloc() local
181 struct snd_seq_queue *q; snd_seq_queue_delete() local
197 struct snd_seq_queue *q; queueptr() local
212 struct snd_seq_queue *q; snd_seq_queue_find_name() local
230 snd_seq_check_queue(struct snd_seq_queue * q,int atomic,int hop) snd_seq_check_queue() argument
289 struct snd_seq_queue *q; snd_seq_enqueue_event() local
340 check_access(struct snd_seq_queue * q,int client) check_access() argument
348 queue_access_lock(struct snd_seq_queue * q,int client) queue_access_lock() argument
360 queue_access_unlock(struct snd_seq_queue * q) queue_access_unlock() argument
369 struct snd_seq_queue *q = queueptr(queueid); snd_seq_queue_check_access() local
387 struct snd_seq_queue *q = queueptr(queueid); snd_seq_queue_set_owner() local
453 struct snd_seq_queue *q = queueptr(queueid); snd_seq_queue_timer_set_tempo() local
517 struct snd_seq_queue *q; snd_seq_queue_is_used() local
538 struct snd_seq_queue *q; snd_seq_queue_client_leave() local
571 struct snd_seq_queue *q; snd_seq_queue_remove_cells() local
592 queue_broadcast_event(struct snd_seq_queue * q,struct snd_seq_event * ev,int atomic,int hop) queue_broadcast_event() argument
615 snd_seq_queue_process_event(struct snd_seq_queue * q,struct snd_seq_event * ev,int atomic,int hop) snd_seq_queue_process_event() argument
670 struct snd_seq_queue *q; snd_seq_control_queue() local
700 struct snd_seq_queue *q; snd_seq_info_queues_read() local
[all...]
/linux/drivers/media/common/videobuf2/ !
H A Dvideobuf2-core.c47 #define dprintk(q, level, fmt, arg...) \ argument
103 #define log_qop(q, op) \ argument
107 #define call_qop(q, op, args...) \ argument
118 call_void_qop(q,op,args...) global() argument
167 call_qop(q,op,args...) global() argument
170 call_void_qop(q,op,args...) global() argument
187 call_bufop(q,op,args...) global() argument
195 call_void_bufop(q,op,args...) global() argument
225 struct vb2_queue *q = vb->vb2_queue; __vb2_buf_mem_alloc() local
380 struct vb2_queue *q = vb->vb2_queue; __setup_offsets() local
408 init_buffer_cache_hints(struct vb2_queue * q,struct vb2_buffer * vb) init_buffer_cache_hints() argument
435 vb2_queue_add_buffer(struct vb2_queue * q,struct vb2_buffer * vb,unsigned int index) vb2_queue_add_buffer() argument
465 __vb2_queue_alloc(struct vb2_queue * q,enum vb2_memory memory,unsigned int num_buffers,unsigned int num_planes,const unsigned int plane_sizes[VB2_MAX_PLANES],unsigned int * first_index) __vb2_queue_alloc() argument
559 __vb2_free_mem(struct vb2_queue * q,unsigned int start,unsigned int count) __vb2_free_mem() argument
584 __vb2_queue_free(struct vb2_queue * q,unsigned int start,unsigned int count) __vb2_queue_free() argument
700 vb2_buffer_in_use(struct vb2_queue * q,struct vb2_buffer * vb) vb2_buffer_in_use() argument
722 __buffers_in_use(struct vb2_queue * q) __buffers_in_use() argument
737 vb2_core_querybuf(struct vb2_queue * q,struct vb2_buffer * vb,void * pb) vb2_core_querybuf() argument
747 __verify_userptr_ops(struct vb2_queue * q) __verify_userptr_ops() argument
760 __verify_mmap_ops(struct vb2_queue * q) __verify_mmap_ops() argument
773 __verify_dmabuf_ops(struct vb2_queue * q) __verify_dmabuf_ops() argument
783 vb2_verify_memory_type(struct vb2_queue * q,enum vb2_memory memory,unsigned int type) vb2_verify_memory_type() argument
829 set_queue_coherency(struct vb2_queue * q,bool non_coherent_mem) set_queue_coherency() argument
838 verify_coherency_flags(struct vb2_queue * q,bool non_coherent_mem) verify_coherency_flags() argument
847 vb2_core_allocated_buffers_storage(struct vb2_queue * q) vb2_core_allocated_buffers_storage() argument
865 vb2_core_free_buffers_storage(struct vb2_queue * q) vb2_core_free_buffers_storage() argument
873 vb2_core_reqbufs(struct vb2_queue * q,enum vb2_memory memory,unsigned int flags,unsigned int * count) vb2_core_reqbufs() argument
1038 vb2_core_create_bufs(struct vb2_queue * q,enum vb2_memory memory,unsigned int flags,unsigned int * count,unsigned int requested_planes,const unsigned int requested_sizes[],unsigned int * first_index) vb2_core_create_bufs() argument
1184 struct vb2_queue *q = vb->vb2_queue; vb2_buffer_done() local
1238 vb2_discard_done(struct vb2_queue * q) vb2_discard_done() argument
1268 struct vb2_queue *q = vb->vb2_queue; __prepare_userptr() local
1384 struct vb2_queue *q = vb->vb2_queue; __prepare_dmabuf() local
1539 struct vb2_queue *q = vb->vb2_queue; __enqueue_in_driver() local
1551 struct vb2_queue *q = vb->vb2_queue; __buf_prepare() local
1699 vb2_core_prepare_buf(struct vb2_queue * q,struct vb2_buffer * vb,void * pb) vb2_core_prepare_buf() argument
1726 vb2_core_remove_bufs(struct vb2_queue * q,unsigned int start,unsigned int count) vb2_core_remove_bufs() argument
1775 vb2_start_streaming(struct vb2_queue * q) vb2_start_streaming() argument
1831 vb2_core_qbuf(struct vb2_queue * q,struct vb2_buffer * vb,void * pb,struct media_request * req) vb2_core_qbuf() argument
1988 __vb2_wait_for_done_vb(struct vb2_queue * q,int nonblocking) __vb2_wait_for_done_vb() argument
2076 __vb2_get_done_vb(struct vb2_queue * q,struct vb2_buffer ** vb,void * pb,int nonblocking) __vb2_get_done_vb() argument
2109 vb2_wait_for_all_buffers(struct vb2_queue * q) vb2_wait_for_all_buffers() argument
2127 struct vb2_queue *q = vb->vb2_queue; __vb2_dqbuf() local
2138 vb2_core_dqbuf(struct vb2_queue * q,unsigned int * pindex,void * pb,bool nonblocking) vb2_core_dqbuf() argument
2202 __vb2_queue_cancel(struct vb2_queue * q) __vb2_queue_cancel() argument
2313 vb2_core_streamon(struct vb2_queue * q,unsigned int type) vb2_core_streamon() argument
2364 vb2_queue_error(struct vb2_queue * q) vb2_queue_error() argument
2372 vb2_core_streamoff(struct vb2_queue * q,unsigned int type) vb2_core_streamoff() argument
2400 __find_plane_by_offset(struct vb2_queue * q,unsigned long offset,struct vb2_buffer ** vb,unsigned int * plane) __find_plane_by_offset() argument
2434 vb2_core_expbuf(struct vb2_queue * q,int * fd,unsigned int type,struct vb2_buffer * vb,unsigned int plane,unsigned int flags) vb2_core_expbuf() argument
2499 vb2_mmap(struct vb2_queue * q,struct vm_area_struct * vma) vb2_mmap() argument
2569 vb2_get_unmapped_area(struct vb2_queue * q,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags) vb2_get_unmapped_area() argument
2602 vb2_core_queue_init(struct vb2_queue * q) vb2_core_queue_init() argument
2690 vb2_core_queue_release(struct vb2_queue * q) vb2_core_queue_release() argument
2702 vb2_core_poll(struct vb2_queue * q,struct file * file,poll_table * wait) vb2_core_poll() argument
2851 __vb2_init_fileio(struct vb2_queue * q,int read) __vb2_init_fileio() argument
2985 __vb2_cleanup_fileio(struct vb2_queue * q) __vb2_cleanup_fileio() argument
3009 __vb2_perform_fileio(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblock,int read) __vb2_perform_fileio() argument
3173 vb2_read(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblocking) vb2_read() argument
3180 vb2_write(struct vb2_queue * q,const char __user * data,size_t count,loff_t * ppos,int nonblocking) vb2_write() argument
3197 struct vb2_queue *q = data; vb2_thread() local
3275 vb2_thread_start(struct vb2_queue * q,vb2_thread_fnc fnc,void * priv,const char * thread_name) vb2_thread_start() argument
3315 vb2_thread_stop(struct vb2_queue * q) vb2_thread_stop() argument
[all...]
/linux/drivers/net/ethernet/fungible/funeth/ !
H A Dfuneth_tx.c56 static void *txq_end(const struct funeth_txq *q) in txq_end() argument
64 static unsigned int txq_to_end(const struct funeth_txq *q, void *p) in txq_to_end() argument
78 static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, in fun_write_gl() argument
107 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, in fun_tls_tx() argument
149 write_pkt_desc(struct sk_buff * skb,struct funeth_txq * q,unsigned int tls_len) write_pkt_desc() argument
311 fun_txq_avail(const struct funeth_txq * q) fun_txq_avail() argument
317 fun_tx_check_stop(struct funeth_txq * q) fun_tx_check_stop() argument
338 fun_txq_may_restart(struct funeth_txq * q) fun_txq_may_restart() argument
347 struct funeth_txq *q = fp->txqs[qid]; fun_start_xmit() local
385 txq_hw_head(const struct funeth_txq * q) txq_hw_head() argument
393 fun_unmap_pkt(const struct funeth_txq * q,unsigned int idx) fun_unmap_pkt() argument
423 fun_txq_reclaim(struct funeth_txq * q,int budget) fun_txq_reclaim() argument
471 struct funeth_txq *q = irq->txq; fun_txq_napi_poll() local
484 fun_xdpq_clean(struct funeth_txq * q,unsigned int budget) fun_xdpq_clean() argument
514 fun_xdp_tx(struct funeth_txq * q,struct xdp_frame * xdpf) fun_xdp_tx() argument
575 struct funeth_txq *q, **xdpqs; fun_xdp_xmit_frames() local
601 fun_txq_purge(struct funeth_txq * q) fun_txq_purge() argument
612 fun_xdpq_purge(struct funeth_txq * q) fun_xdpq_purge() argument
629 struct funeth_txq *q; fun_txq_create_sw() local
665 fun_txq_free_sw(struct funeth_txq * q) fun_txq_free_sw() argument
680 fun_txq_create_dev(struct funeth_txq * q,struct fun_irq * irq) fun_txq_create_dev() argument
731 fun_txq_free_dev(struct funeth_txq * q) fun_txq_free_dev() argument
763 struct funeth_txq *q = *qp; funeth_txq_create() local
789 funeth_txq_free(struct funeth_txq * q,int state) funeth_txq_free() argument
[all...]
H A Dfuneth_rx.c50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) in cache_offer() argument
67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) in cache_get() argument
98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, in funeth_alloc_page() argument
127 funeth_free_page(struct funeth_rxq * q,struct funeth_rxbuf * rb) funeth_free_page() argument
141 fun_run_xdp(struct funeth_rxq * q,skb_frag_t * frags,void * buf_va,int ref_ok,struct funeth_txq * xdp_q) fun_run_xdp() argument
256 get_buf(struct funeth_rxq * q,struct funeth_rxbuf * buf,unsigned int len) get_buf() argument
296 fun_gather_pkt(struct funeth_rxq * q,unsigned int tot_len,skb_frag_t * frags) fun_gather_pkt() argument
348 advance_cq(struct funeth_rxq * q) advance_cq() argument
365 fun_handle_cqe_pkt(struct funeth_rxq * q,struct funeth_txq * xdp_q) fun_handle_cqe_pkt() argument
479 fun_process_cqes(struct funeth_rxq * q,int budget) fun_process_cqes() argument
513 struct funeth_rxq *q = irq->rxq; fun_rxq_napi_poll() local
536 fun_rxq_free_bufs(struct funeth_rxq * q) fun_rxq_free_bufs() argument
549 fun_rxq_alloc_bufs(struct funeth_rxq * q,int node) fun_rxq_alloc_bufs() argument
575 fun_rxq_free_cache(struct funeth_rxq * q) fun_rxq_free_cache() argument
587 fun_rxq_set_bpf(struct funeth_rxq * q,struct bpf_prog * prog) fun_rxq_set_bpf() argument
620 struct funeth_rxq *q; fun_rxq_create_sw() local
677 fun_rxq_free_sw(struct funeth_rxq * q) fun_rxq_free_sw() argument
697 fun_rxq_create_dev(struct funeth_rxq * q,struct fun_irq * irq) fun_rxq_create_dev() argument
765 fun_rxq_free_dev(struct funeth_rxq * q) fun_rxq_free_dev() argument
792 struct funeth_rxq *q = *qp; funeth_rxq_create() local
817 funeth_rxq_free(struct funeth_rxq * q,int state) funeth_rxq_free() argument
[all...]
/linux/sound/core/seq/oss/ !
H A Dseq_oss_readq.c35 struct seq_oss_readq *q; in snd_seq_oss_readq_new() local
62 snd_seq_oss_readq_delete(struct seq_oss_readq * q) snd_seq_oss_readq_delete() argument
74 snd_seq_oss_readq_clear(struct seq_oss_readq * q) snd_seq_oss_readq_clear() argument
89 snd_seq_oss_readq_puts(struct seq_oss_readq * q,int dev,unsigned char * data,int len) snd_seq_oss_readq_puts() argument
123 snd_seq_oss_readq_sysex(struct seq_oss_readq * q,int dev,struct snd_seq_event * ev) snd_seq_oss_readq_sysex() argument
141 snd_seq_oss_readq_put_event(struct seq_oss_readq * q,union evrec * ev) snd_seq_oss_readq_put_event() argument
169 snd_seq_oss_readq_pick(struct seq_oss_readq * q,union evrec * rec) snd_seq_oss_readq_pick() argument
181 snd_seq_oss_readq_wait(struct seq_oss_readq * q) snd_seq_oss_readq_wait() argument
193 snd_seq_oss_readq_free(struct seq_oss_readq * q) snd_seq_oss_readq_free() argument
206 snd_seq_oss_readq_poll(struct seq_oss_readq * q,struct file * file,poll_table * wait) snd_seq_oss_readq_poll() argument
216 snd_seq_oss_readq_put_timestamp(struct seq_oss_readq * q,unsigned long curt,int seq_mode) snd_seq_oss_readq_put_timestamp() argument
244 snd_seq_oss_readq_info_read(struct seq_oss_readq * q,struct snd_info_buffer * buf) snd_seq_oss_readq_info_read() argument
[all...]
H A Dseq_oss_writeq.c27 struct seq_oss_writeq *q; in snd_seq_oss_writeq_new() local
54 snd_seq_oss_writeq_delete(struct seq_oss_writeq * q) snd_seq_oss_writeq_delete() argument
67 snd_seq_oss_writeq_clear(struct seq_oss_writeq * q) snd_seq_oss_writeq_clear() argument
83 snd_seq_oss_writeq_sync(struct seq_oss_writeq * q) snd_seq_oss_writeq_sync() argument
123 snd_seq_oss_writeq_wakeup(struct seq_oss_writeq * q,abstime_t time) snd_seq_oss_writeq_wakeup() argument
139 snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq * q) snd_seq_oss_writeq_get_free_size() argument
152 snd_seq_oss_writeq_set_output(struct seq_oss_writeq * q,int val) snd_seq_oss_writeq_set_output() argument
[all...]
/linux/kernel/sched/ !
H A Dswait.c7 void __init_swait_queue_head(struct swait_queue_head *q, const char *name, in __init_swait_queue_head() argument
22 void swake_up_locked(struct swait_queue_head *q, int wake_flags) in swake_up_locked() argument
42 void swake_up_all_locked(struct swait_queue_head *q) in swake_up_all_locked() argument
48 swake_up_one(struct swait_queue_head * q) swake_up_one() argument
62 swake_up_all(struct swait_queue_head * q) swake_up_all() argument
85 __prepare_to_swait(struct swait_queue_head * q,struct swait_queue * wait) __prepare_to_swait() argument
92 prepare_to_swait_exclusive(struct swait_queue_head * q,struct swait_queue * wait,int state) prepare_to_swait_exclusive() argument
103 prepare_to_swait_event(struct swait_queue_head * q,struct swait_queue * wait,int state) prepare_to_swait_event() argument
126 __finish_swait(struct swait_queue_head * q,struct swait_queue * wait) __finish_swait() argument
133 finish_swait(struct swait_queue_head * q,struct swait_queue * wait) finish_swait() argument
[all...]
/linux/block/ !
H A Delevator.c75 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge() local
136 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument
164 static void elevator_exit(struct request_queue *q) in elevator_exit() argument
183 elv_rqhash_del(struct request_queue * q,struct request * rq) elv_rqhash_del() argument
190 elv_rqhash_add(struct request_queue * q,struct request * rq) elv_rqhash_add() argument
200 elv_rqhash_reposition(struct request_queue * q,struct request * rq) elv_rqhash_reposition() argument
206 elv_rqhash_find(struct request_queue * q,sector_t offset) elv_rqhash_find() argument
280 elv_merge(struct request_queue * q,struct request ** req,struct bio * bio) elv_merge() argument
336 elv_attempt_insert_merge(struct request_queue * q,struct request * rq,struct list_head * free) elv_attempt_insert_merge() argument
374 elv_merged_request(struct request_queue * q,struct request * rq,enum elv_merge type) elv_merged_request() argument
388 elv_merge_requests(struct request_queue * q,struct request * rq,struct request * next) elv_merge_requests() argument
400 elv_latter_request(struct request_queue * q,struct request * rq) elv_latter_request() argument
410 elv_former_request(struct request_queue * q,struct request * rq) elv_former_request() argument
469 elv_register_queue(struct request_queue * q,struct elevator_queue * e,bool uevent) elv_register_queue() argument
498 elv_unregister_queue(struct request_queue * q,struct elevator_queue * e) elv_unregister_queue() argument
574 elevator_switch(struct request_queue * q,struct elv_change_ctx * ctx) elevator_switch() argument
620 elv_exit_and_release(struct request_queue * q) elv_exit_and_release() argument
637 elevator_change_done(struct request_queue * q,struct elv_change_ctx * ctx) elevator_change_done() argument
663 elevator_change(struct request_queue * q,struct elv_change_ctx * ctx) elevator_change() argument
708 elv_update_nr_hw_queues(struct request_queue * q,struct elevator_type * e,struct elevator_tags * t) elv_update_nr_hw_queues() argument
740 elevator_set_default(struct request_queue * q) elevator_set_default() argument
774 elevator_set_none(struct request_queue * q) elevator_set_none() argument
804 struct request_queue *q = disk->queue; elv_iosched_store() local
835 struct request_queue *q = disk->queue; elv_iosched_show() local
862 elv_rb_former_request(struct request_queue * q,struct request * rq) elv_rb_former_request() argument
874 elv_rb_latter_request(struct request_queue * q,struct request * rq) elv_rb_latter_request() argument
[all...]
H A Dblk-core.c80 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_set() argument
91 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) in blk_queue_flag_clear() argument
220 void blk_sync_queue(struct request_queue *q) in blk_sync_queue() argument
231 blk_set_pm_only(struct request_queue * q) blk_set_pm_only() argument
237 blk_clear_pm_only(struct request_queue * q) blk_clear_pm_only() argument
250 struct request_queue *q = container_of(rcu_head, blk_free_queue_rcu() local
257 blk_free_queue(struct request_queue * q) blk_free_queue() argument
276 blk_put_queue(struct request_queue * q) blk_put_queue() argument
283 blk_queue_start_drain(struct request_queue * q) blk_queue_start_drain() argument
304 blk_queue_enter(struct request_queue * q,blk_mq_req_flags_t flags) blk_queue_enter() argument
333 __bio_queue_enter(struct request_queue * q,struct bio * bio) __bio_queue_enter() argument
369 blk_queue_exit(struct request_queue * q) blk_queue_exit() argument
376 struct request_queue *q = blk_queue_usage_counter_release() local
384 struct request_queue *q = timer_container_of(q, t, timeout); blk_rq_timed_out_timer() local
395 struct request_queue *q; blk_alloc_queue() local
486 blk_get_queue(struct request_queue * q) blk_get_queue() argument
594 blk_check_zone_append(struct request_queue * q,struct bio * bio) blk_check_zone_append() argument
681 struct request_queue *q = bdev_get_queue(bio->bi_bdev); __submit_bio_noacct() local
756 blk_validate_atomic_write_op_size(struct request_queue * q,struct bio * bio) blk_validate_atomic_write_op_size() argument
780 struct request_queue *q = bdev_get_queue(bdev); submit_bio_noacct() local
936 struct request_queue *q; bio_poll() local
1097 blk_lld_busy(struct request_queue * q) blk_lld_busy() argument
[all...]
H A Dblk-mq.c115 static bool blk_freeze_set_owner(struct request_queue *q, in blk_freeze_set_owner() argument
137 blk_unfreeze_check_owner(struct request_queue * q) blk_unfreeze_check_owner() argument
150 blk_freeze_set_owner(struct request_queue * q,struct task_struct * owner) blk_freeze_set_owner() argument
156 blk_unfreeze_check_owner(struct request_queue * q) blk_unfreeze_check_owner() argument
162 __blk_freeze_queue_start(struct request_queue * q,struct task_struct * owner) __blk_freeze_queue_start() argument
181 blk_freeze_queue_start(struct request_queue * q) blk_freeze_queue_start() argument
188 blk_mq_freeze_queue_wait(struct request_queue * q) blk_mq_freeze_queue_wait() argument
194 blk_mq_freeze_queue_wait_timeout(struct request_queue * q,unsigned long timeout) blk_mq_freeze_queue_wait_timeout() argument
203 blk_mq_freeze_queue_nomemsave(struct request_queue * q) blk_mq_freeze_queue_nomemsave() argument
210 __blk_mq_unfreeze_queue(struct request_queue * q,bool force_atomic) __blk_mq_unfreeze_queue() argument
229 blk_mq_unfreeze_queue_nomemrestore(struct request_queue * q) blk_mq_unfreeze_queue_nomemrestore() argument
243 blk_freeze_queue_start_non_owner(struct request_queue * q) blk_freeze_queue_start_non_owner() argument
250 blk_mq_unfreeze_queue_non_owner(struct request_queue * q) blk_mq_unfreeze_queue_non_owner() argument
260 blk_mq_quiesce_queue_nowait(struct request_queue * q) blk_mq_quiesce_queue_nowait() argument
298 blk_mq_quiesce_queue(struct request_queue * q) blk_mq_quiesce_queue() argument
314 blk_mq_unquiesce_queue(struct request_queue * q) blk_mq_unquiesce_queue() argument
336 struct request_queue *q; blk_mq_quiesce_tagset() local
351 struct request_queue *q; blk_mq_unquiesce_tagset() local
362 blk_mq_wake_waiters(struct request_queue * q) blk_mq_wake_waiters() argument
372 blk_rq_init(struct request_queue * q,struct request * rq) blk_rq_init() argument
404 struct request_queue *q = data->q; blk_mq_rq_ctx_init() local
487 struct request_queue *q = data->q; __blk_mq_alloc_requests() local
569 blk_mq_rq_cache_fill(struct request_queue * q,struct blk_plug * plug,blk_opf_t opf,blk_mq_req_flags_t flags) blk_mq_rq_cache_fill() argument
598 blk_mq_alloc_cached_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags) blk_mq_alloc_cached_request() argument
633 blk_mq_alloc_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags) blk_mq_alloc_request() argument
671 blk_mq_alloc_request_hctx(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags,unsigned int hctx_idx) blk_mq_alloc_request_hctx() argument
754 struct request_queue *q = rq->q; blk_mq_finish_request() local
771 struct request_queue *q = rq->q; __blk_mq_free_request() local
792 struct request_queue *q = rq->q; blk_mq_free_request() local
1162 struct request_queue *q = hctx->queue; blk_mq_flush_tag_batch() local
1343 struct request_queue *q = rq->q; blk_mq_start_request() local
1508 struct request_queue *q = rq->q; __blk_mq_requeue_request() local
1523 struct request_queue *q = rq->q; blk_mq_requeue_request() local
1542 struct request_queue *q = blk_mq_requeue_work() local
1577 blk_mq_kick_requeue_list(struct request_queue * q) blk_mq_kick_requeue_list() argument
1583 blk_mq_delay_kick_requeue_list(struct request_queue * q,unsigned long msecs) blk_mq_delay_kick_requeue_list() argument
1620 blk_mq_queue_inflight(struct request_queue * q) blk_mq_queue_inflight() argument
1709 struct request_queue *q = blk_mq_timeout_work() local
2055 blk_mq_release_budgets(struct request_queue * q,struct list_head * list) blk_mq_release_budgets() argument
2092 struct request_queue *q = hctx->queue; blk_mq_dispatch_rq_list() local
2367 blk_mq_get_sq_hctx(struct request_queue * q) blk_mq_get_sq_hctx() argument
2389 blk_mq_run_hw_queues(struct request_queue * q,bool async) blk_mq_run_hw_queues() argument
2417 blk_mq_delay_run_hw_queues(struct request_queue * q,unsigned long msecs) blk_mq_delay_run_hw_queues() argument
2474 blk_mq_stop_hw_queues(struct request_queue * q) blk_mq_stop_hw_queues() argument
2492 blk_mq_start_hw_queues(struct request_queue * q) blk_mq_start_hw_queues() argument
2518 blk_mq_start_stopped_hw_queues(struct request_queue * q,bool async) blk_mq_start_stopped_hw_queues() argument
2597 struct request_queue *q = rq->q; blk_mq_insert_request() local
2683 struct request_queue *q = rq->q; __blk_mq_issue_directly() local
2824 __blk_mq_flush_list(struct request_queue * q,struct rq_list * rqs) __blk_mq_flush_list() argument
2862 struct request_queue *q = rq_list_peek(rqs)->q; blk_mq_dispatch_queue_requests() local
3004 blk_mq_attempt_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs) blk_mq_attempt_bio_merge() argument
3016 blk_mq_get_new_requests(struct request_queue * q,struct blk_plug * plug,struct bio * bio) blk_mq_get_new_requests() argument
3051 blk_mq_peek_cached_request(struct blk_plug * plug,struct request_queue * q,blk_opf_t opf) blk_mq_peek_cached_request() argument
3087 bio_unaligned(const struct bio * bio,struct request_queue * q) bio_unaligned() argument
3113 struct request_queue *q = bdev_get_queue(bio->bi_bdev); blk_mq_submit_bio() local
3242 struct request_queue *q = rq->q; blk_insert_cloned_request() local
3861 blk_mq_remove_hw_queues_cpuhp(struct request_queue * q) blk_mq_remove_hw_queues_cpuhp() argument
3883 blk_mq_add_hw_queues_cpuhp(struct request_queue * q) blk_mq_add_hw_queues_cpuhp() argument
3924 blk_mq_exit_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx) blk_mq_exit_hctx() argument
3949 blk_mq_exit_hw_queues(struct request_queue * q,struct blk_mq_tag_set * set,int nr_queue) blk_mq_exit_hw_queues() argument
3963 blk_mq_init_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned hctx_idx) blk_mq_init_hctx() argument
3995 blk_mq_alloc_hctx(struct request_queue * q,struct blk_mq_tag_set * set,int node) blk_mq_alloc_hctx() argument
4061 blk_mq_init_cpu_queues(struct request_queue * q,unsigned int nr_hw_queues) blk_mq_init_cpu_queues() argument
4145 blk_mq_map_swqueue(struct request_queue * q) blk_mq_map_swqueue() argument
4263 queue_set_hctx_shared(struct request_queue * q,bool shared) queue_set_hctx_shared() argument
4281 struct request_queue *q; blk_mq_update_tag_set_shared() local
4293 blk_mq_del_queue_tag_set(struct request_queue * q) blk_mq_del_queue_tag_set() argument
4310 blk_mq_add_queue_tag_set(struct blk_mq_tag_set * set,struct request_queue * q) blk_mq_add_queue_tag_set() argument
4331 blk_mq_alloc_ctxs(struct request_queue * q) blk_mq_alloc_ctxs() argument
4364 blk_mq_release(struct request_queue * q) blk_mq_release() argument
4391 struct request_queue *q; blk_mq_alloc_queue() local
4423 blk_mq_destroy_queue(struct request_queue * q) blk_mq_destroy_queue() argument
4444 struct request_queue *q; __blk_mq_alloc_disk() local
4462 blk_mq_alloc_disk_for_queue(struct request_queue * q,struct lock_class_key * lkclass) blk_mq_alloc_disk_for_queue() argument
4486 blk_mq_alloc_and_init_hctx(struct blk_mq_tag_set * set,struct request_queue * q,int hctx_idx,int node) blk_mq_alloc_and_init_hctx() argument
4520 __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set * set,struct request_queue * q) __blk_mq_realloc_hw_ctxs() argument
4560 blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set * set,struct request_queue * q) blk_mq_realloc_hw_ctxs() argument
4572 blk_mq_init_allocated_queue(struct blk_mq_tag_set * set,struct request_queue * q) blk_mq_init_allocated_queue() argument
4624 blk_mq_exit_queue(struct request_queue * q) blk_mq_exit_queue() argument
4920 blk_mq_update_nr_requests(struct request_queue * q,unsigned int nr) blk_mq_update_nr_requests() argument
4976 blk_mq_elv_switch_back(struct request_queue * q,struct xarray * elv_tbl,struct xarray * et_tbl) blk_mq_elv_switch_back() argument
4994 blk_mq_elv_switch_none(struct request_queue * q,struct xarray * elv_tbl) blk_mq_elv_switch_none() argument
5031 struct request_queue *q; __blk_mq_update_nr_hw_queues() local
5125 blk_hctx_poll(struct request_queue * q,struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob,unsigned int flags) blk_hctx_poll() argument
5152 blk_mq_poll(struct request_queue * q,blk_qc_t cookie,struct io_comp_batch * iob,unsigned int flags) blk_mq_poll() argument
5163 struct request_queue *q = rq->q; blk_rq_poll() local
5184 blk_mq_cancel_work_sync(struct request_queue * q) blk_mq_cancel_work_sync() argument
[all...]
H A Dblk-rq-qos.h62 static inline struct rq_qos *rq_qos_id(struct request_queue *q, in rq_qos_id() argument
73 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) in wbt_rq_qos() argument
78 static inline struct rq_qos *iolat_rq_qos(struct request_queue *q) in iolat_rq_qos() argument
114 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) in rq_qos_cleanup() argument
120 static inline void rq_qos_done(struct request_queue *q, struc argument
127 rq_qos_issue(struct request_queue * q,struct request * rq) rq_qos_issue() argument
133 rq_qos_requeue(struct request_queue * q,struct request * rq) rq_qos_requeue() argument
144 struct request_queue *q = bdev_get_queue(bio->bi_bdev); rq_qos_done_bio() local
150 rq_qos_throttle(struct request_queue * q,struct bio * bio) rq_qos_throttle() argument
158 rq_qos_track(struct request_queue * q,struct request * rq,struct bio * bio) rq_qos_track() argument
165 rq_qos_merge(struct request_queue * q,struct request * rq,struct bio * bio) rq_qos_merge() argument
174 rq_qos_queue_depth_changed(struct request_queue * q) rq_qos_queue_depth_changed() argument
[all...]
/linux/drivers/infiniband/sw/rxe/ !
H A Drxe_queue.h95 static inline u32 queue_next_index(struct rxe_queue *q, int index) in queue_next_index() argument
100 static inline u32 queue_get_producer(const struct rxe_queue *q, in queue_get_producer() argument
127 queue_get_consumer(const struct rxe_queue * q,enum queue_type type) queue_get_consumer() argument
154 queue_empty(struct rxe_queue * q,enum queue_type type) queue_empty() argument
162 queue_full(struct rxe_queue * q,enum queue_type type) queue_full() argument
170 queue_count(const struct rxe_queue * q,enum queue_type type) queue_count() argument
179 queue_advance_producer(struct rxe_queue * q,enum queue_type type) queue_advance_producer() argument
215 queue_advance_consumer(struct rxe_queue * q,enum queue_type type) queue_advance_consumer() argument
250 queue_producer_addr(struct rxe_queue * q,enum queue_type type) queue_producer_addr() argument
258 queue_consumer_addr(struct rxe_queue * q,enum queue_type type) queue_consumer_addr() argument
266 queue_addr_from_index(struct rxe_queue * q,u32 index) queue_addr_from_index() argument
272 queue_index_from_addr(const struct rxe_queue * q,const void * addr) queue_index_from_addr() argument
279 queue_head(struct rxe_queue * q,enum queue_type type) queue_head() argument
[all...]
/linux/drivers/net/wireless/broadcom/b43/ !
H A Dpio.c24 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument
49 struct b43_pio_txqueue *q = NULL; in parse_cookie() local
126 struct b43_pio_txqueue *q; b43_setup_pioqueue_tx() local
162 struct b43_pio_rxqueue *q; b43_setup_pioqueue_rx() local
178 b43_pio_cancel_tx_packets(struct b43_pio_txqueue * q) b43_pio_cancel_tx_packets() argument
192 b43_destroy_pioqueue_tx(struct b43_pio_txqueue * q,const char * name) b43_destroy_pioqueue_tx() argument
201 b43_destroy_pioqueue_rx(struct b43_pio_rxqueue * q,const char * name) b43_destroy_pioqueue_rx() argument
290 struct b43_pio_txqueue *q; select_queue_by_priority() local
317 tx_write_2byte_queue(struct b43_pio_txqueue * q,u16 ctl,const void * _data,unsigned int data_len) tx_write_2byte_queue() argument
352 struct b43_pio_txqueue *q = pack->queue; pio_tx_frame_2byte_queue() local
370 tx_write_4byte_queue(struct b43_pio_txqueue * q,u32 ctl,const void * _data,unsigned int data_len) tx_write_4byte_queue() argument
422 struct b43_pio_txqueue *q = pack->queue; pio_tx_frame_4byte_queue() local
440 pio_tx_frame(struct b43_pio_txqueue * q,struct sk_buff * skb) pio_tx_frame() argument
491 struct b43_pio_txqueue *q; b43_pio_tx() local
566 struct b43_pio_txqueue *q; b43_pio_handle_txstatus() local
596 pio_rx_frame(struct b43_pio_rxqueue * q) pio_rx_frame() argument
762 b43_pio_rx(struct b43_pio_rxqueue * q) b43_pio_rx() argument
777 b43_pio_tx_suspend_queue(struct b43_pio_txqueue * q) b43_pio_tx_suspend_queue() argument
790 b43_pio_tx_resume_queue(struct b43_pio_txqueue * q) b43_pio_tx_resume_queue() argument
[all...]
/linux/drivers/spi/ !
H A Dspi-fsl-qspi.c277 static inline int needs_swap_endian(struct fsl_qspi *q) in needs_swap_endian() argument
282 static inline int needs_4x_clock(struct fsl_qspi *q) in needs_4x_clock() argument
287 static inline int needs_fill_txfifo(struct fsl_qspi *q) in needs_fill_txfifo() argument
292 static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) in needs_wakeup_wait_mode() argument
297 static inline int needs_amba_base_offset(struct fsl_qspi *q) in needs_amba_base_offset() argument
302 needs_tdh_setting(struct fsl_qspi * q) needs_tdh_setting() argument
311 fsl_qspi_endian_xchg(struct fsl_qspi * q,u32 a) fsl_qspi_endian_xchg() argument
323 qspi_writel(struct fsl_qspi * q,u32 val,void __iomem * addr) qspi_writel() argument
331 qspi_readl(struct fsl_qspi * q,void __iomem * addr) qspi_readl() argument
341 struct fsl_qspi *q = dev_id; fsl_qspi_irq_handler() local
355 fsl_qspi_check_buswidth(struct fsl_qspi * q,u8 width) fsl_qspi_check_buswidth() argument
370 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_supports_op() local
415 fsl_qspi_prepare_lut(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_prepare_lut() argument
471 fsl_qspi_clk_prep_enable(struct fsl_qspi * q) fsl_qspi_clk_prep_enable() argument
491 fsl_qspi_clk_disable_unprep(struct fsl_qspi * q) fsl_qspi_clk_disable_unprep() argument
507 fsl_qspi_invalidate(struct fsl_qspi * q) fsl_qspi_invalidate() argument
525 fsl_qspi_select_mem(struct fsl_qspi * q,struct spi_device * spi,const struct spi_mem_op * op) fsl_qspi_select_mem() argument
552 fsl_qspi_read_ahb(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_read_ahb() argument
559 fsl_qspi_fill_txfifo(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_fill_txfifo() argument
584 fsl_qspi_read_rxfifo(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_read_rxfifo() argument
605 fsl_qspi_do_op(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_do_op() argument
630 fsl_qspi_readl_poll_tout(struct fsl_qspi * q,void __iomem * base,u32 mask,u32 delay_us,u32 timeout_us) fsl_qspi_readl_poll_tout() argument
644 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_exec_op() local
706 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_adjust_op_size() local
721 fsl_qspi_default_setup(struct fsl_qspi * q) fsl_qspi_default_setup() argument
812 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_get_name() local
849 struct fsl_qspi *q = data; fsl_qspi_disable() local
858 struct fsl_qspi *q = data; fsl_qspi_cleanup() local
871 struct fsl_qspi *q; fsl_qspi_probe() local
965 struct fsl_qspi *q = dev_get_drvdata(dev); fsl_qspi_resume() local
[all...]
/linux/drivers/accel/habanalabs/common/ !
H A Dhw_queue.c31 static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) in queue_free_slots() argument
44 struct hl_hw_queue *q; in hl_hw_queue_update_ci() local
83 hl_hw_queue_submit_bd(struct hl_device * hdev,struct hl_hw_queue * q,u32 ctl,u32 len,u64 ptr) hl_hw_queue_submit_bd() argument
128 ext_queue_sanity_checks(struct hl_device * hdev,struct hl_hw_queue * q,int num_of_entries,bool reserve_cq_entry) ext_queue_sanity_checks() argument
177 int_queue_sanity_checks(struct hl_device * hdev,struct hl_hw_queue * q,int num_of_entries) int_queue_sanity_checks() argument
211 hw_queue_sanity_checks(struct hl_device * hdev,struct hl_hw_queue * q,int num_of_entries) hw_queue_sanity_checks() argument
242 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; hl_hw_queue_send_cb_no_cmpl() local
282 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; ext_queue_schedule_job() local
347 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; int_queue_schedule_job() local
383 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; hw_queue_schedule_job() local
643 struct hl_hw_queue *q; hl_hw_queue_schedule_cs() local
819 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; hl_hw_queue_inc_ci_kernel() local
824 ext_and_cpu_queue_init(struct hl_device * hdev,struct hl_hw_queue * q,bool is_cpu_queue) ext_and_cpu_queue_init() argument
865 int_queue_init(struct hl_device * hdev,struct hl_hw_queue * q) int_queue_init() argument
885 cpu_queue_init(struct hl_device * hdev,struct hl_hw_queue * q) cpu_queue_init() argument
890 ext_queue_init(struct hl_device * hdev,struct hl_hw_queue * q) ext_queue_init() argument
895 hw_queue_init(struct hl_device * hdev,struct hl_hw_queue * q) hw_queue_init() argument
996 queue_init(struct hl_device * hdev,struct hl_hw_queue * q,u32 hw_queue_id) queue_init() argument
1044 queue_fini(struct hl_device * hdev,struct hl_hw_queue * q) queue_fini() argument
1082 struct hl_hw_queue *q; hl_hw_queues_create() local
1128 struct hl_hw_queue *q; hl_hw_queues_destroy() local
1140 struct hl_hw_queue *q; hl_hw_queue_reset() local
[all...]
/linux/net/sched/ !
H A Dsch_multiq.c32 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_classify() local
89 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dequeue() local
120 struct multiq_sched_data *q = qdisc_priv(sch); multiq_peek() local
151 struct multiq_sched_data *q = qdisc_priv(sch); multiq_reset() local
162 struct multiq_sched_data *q = qdisc_priv(sch); multiq_destroy() local
174 struct multiq_sched_data *q = qdisc_priv(sch); multiq_tune() local
238 struct multiq_sched_data *q = qdisc_priv(sch); multiq_init() local
263 struct multiq_sched_data *q = qdisc_priv(sch); multiq_dump() local
283 struct multiq_sched_data *q = qdisc_priv(sch); multiq_graft() local
296 struct multiq_sched_data *q = qdisc_priv(sch); multiq_leaf() local
304 struct multiq_sched_data *q = qdisc_priv(sch); multiq_find() local
319 multiq_unbind(struct Qdisc * q,unsigned long cl) multiq_unbind() argument
326 struct multiq_sched_data *q = qdisc_priv(sch); multiq_dump_class() local
336 struct multiq_sched_data *q = qdisc_priv(sch); multiq_dump_class_stats() local
349 struct multiq_sched_data *q = qdisc_priv(sch); multiq_walk() local
364 struct multiq_sched_data *q = qdisc_priv(sch); multiq_tcf_block() local
[all...]
H A Dsch_red.c55 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument
60 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument
65 static int red_use_nodrop(struct red_sched_data *q) in red_use_nodrop() argument
74 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local
155 struct red_sched_data *q = qdisc_priv(sch); red_dequeue() local
172 struct red_sched_data *q = qdisc_priv(sch); red_peek() local
180 struct red_sched_data *q = qdisc_priv(sch); red_reset() local
188 struct red_sched_data *q = qdisc_priv(sch); red_offload() local
217 struct red_sched_data *q = qdisc_priv(sch); red_destroy() local
240 struct red_sched_data *q = qdisc_priv(sch); __red_change() local
324 struct red_sched_data *q = timer_container_of(q, t, adapt_timer); red_adaptative_timer() local
340 struct red_sched_data *q = qdisc_priv(sch); red_init() local
374 struct red_sched_data *q = qdisc_priv(sch); red_change() local
413 struct red_sched_data *q = qdisc_priv(sch); red_dump() local
450 struct red_sched_data *q = qdisc_priv(sch); red_dump_stats() local
476 struct red_sched_data *q = qdisc_priv(sch); red_dump_class() local
501 struct red_sched_data *q = qdisc_priv(sch); red_graft() local
514 struct red_sched_data *q = qdisc_priv(sch); red_leaf() local
[all...]
H A Dsch_choke.c75 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument
81 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument
87 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument
93 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument
103 choke_zap_tail_holes(struct choke_sched_data * q) choke_zap_tail_holes() argument
116 struct choke_sched_data *q = qdisc_priv(sch); choke_drop_by_idx() local
179 choke_peek_random(const struct choke_sched_data * q,unsigned int * pidx) choke_peek_random() argument
199 choke_match_random(const struct choke_sched_data * q,struct sk_buff * nskb,unsigned int * pidx) choke_match_random() argument
215 struct choke_sched_data *q = qdisc_priv(sch); choke_enqueue() local
285 struct choke_sched_data *q = qdisc_priv(sch); choke_dequeue() local
306 struct choke_sched_data *q = qdisc_priv(sch); choke_reset() local
338 struct choke_sched_data *q = qdisc_priv(sch); choke_change() local
433 struct choke_sched_data *q = qdisc_priv(sch); choke_dump() local
462 struct choke_sched_data *q = qdisc_priv(sch); choke_dump_stats() local
475 struct choke_sched_data *q = qdisc_priv(sch); choke_destroy() local
482 struct choke_sched_data *q = qdisc_priv(sch); choke_peek_head() local
[all...]
H A Dsch_prio.c33 struct prio_sched_data *q = qdisc_priv(sch); in prio_classify() local
99 struct prio_sched_data *q in prio_peek() local
113 struct prio_sched_data *q = qdisc_priv(sch); prio_dequeue() local
134 struct prio_sched_data *q = qdisc_priv(sch); prio_reset() local
168 struct prio_sched_data *q = qdisc_priv(sch); prio_destroy() local
179 struct prio_sched_data *q = qdisc_priv(sch); prio_tune() local
232 struct prio_sched_data *q = qdisc_priv(sch); prio_init() local
264 struct prio_sched_data *q = qdisc_priv(sch); prio_dump() local
289 struct prio_sched_data *q = qdisc_priv(sch); prio_graft() local
319 struct prio_sched_data *q = qdisc_priv(sch); prio_leaf() local
327 struct prio_sched_data *q = qdisc_priv(sch); prio_find() local
341 prio_unbind(struct Qdisc * q,unsigned long cl) prio_unbind() argument
348 struct prio_sched_data *q = qdisc_priv(sch); prio_dump_class() local
358 struct prio_sched_data *q = qdisc_priv(sch); prio_dump_class_stats() local
372 struct prio_sched_data *q = qdisc_priv(sch); prio_walk() local
387 struct prio_sched_data *q = qdisc_priv(sch); prio_tcf_block() local
[all...]
/linux/drivers/scsi/arm/ !
H A Dqueue.c43 #define SET_MAGIC(q,m) ((q)->magic = (m)) argument
44 #define BAD_MAGIC(q,m) ((q)->magic != (m)) argument
46 #define SET_MAGIC(q,m) do { } while (0) argument
47 #define BAD_MAGIC(q,m) (0) argument
62 QE_t *q; in queue_initialise() local
111 QE_t *q; __queue_add() local
140 QE_t *q; __queue_remove() local
170 QE_t *q = list_entry(l, QE_t, list); queue_remove_exclude() local
219 QE_t *q = list_entry(l, QE_t, list); queue_remove_tgtluntag() local
245 QE_t *q = list_entry(l, QE_t, list); queue_remove_all_target() local
269 QE_t *q = list_entry(l, QE_t, list); queue_probetgtlun() local
295 QE_t *q = list_entry(l, QE_t, list); queue_remove_cmd() local
[all...]
/linux/drivers/net/ !
H A Dtap.c82 struct tap_queue *q) in tap_enable_queue() argument
103 struct tap_queue *q) in tap_set_queue() argument
124 tap_disable_queue(struct tap_queue * q) tap_disable_queue() argument
159 tap_put_queue(struct tap_queue * q) tap_put_queue() argument
237 struct tap_queue *q, *tmp; tap_del_queues() local
260 struct tap_queue *q; tap_handle_frame() local
441 struct tap_queue *q = container_of(sk, struct tap_queue, sk); tap_sock_destruct() local
450 struct tap_queue *q; tap_open() local
515 struct tap_queue *q = file->private_data; tap_release() local
522 struct tap_queue *q = file->private_data; tap_poll() local
572 tap_get_user(struct tap_queue * q,void * msg_control,struct iov_iter * from,int noblock) tap_get_user() argument
694 struct tap_queue *q = file->private_data; tap_write_iter() local
704 tap_put_user(struct tap_queue * q,const struct sk_buff * skb,struct iov_iter * iter) tap_put_user() argument
756 tap_do_read(struct tap_queue * q,struct iov_iter * to,int noblock,struct sk_buff * skb) tap_do_read() argument
808 struct tap_queue *q = file->private_data; tap_read_iter() local
822 tap_get_tap_dev(struct tap_queue * q) tap_get_tap_dev() argument
841 struct tap_queue *q = file->private_data; tap_ioctl_set_queue() local
860 set_offload(struct tap_queue * q,unsigned long arg) set_offload() argument
919 struct tap_queue *q = file->private_data; tap_ioctl() local
1045 tap_get_user_xdp(struct tap_queue * q,struct xdp_buff * xdp) tap_get_user_xdp() argument
1113 struct tap_queue *q = container_of(sock, struct tap_queue, sock); tap_sendmsg() local
1134 struct tap_queue *q = container_of(sock, struct tap_queue, sock); tap_recvmsg() local
1151 struct tap_queue *q = container_of(sock, struct tap_queue, tap_peek_len() local
1169 struct tap_queue *q; tap_get_socket() local
1181 struct tap_queue *q; tap_get_ptr_ring() local
1195 struct tap_queue *q; tap_queue_resize() local
[all...]
/linux/drivers/s390/cio/ !
H A Dqdio_main.c116 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument
166 qdio_do_sqbs(struct qdio_q * q,unsigned char state,int start,int count) qdio_do_sqbs() argument
205 get_buf_states(struct qdio_q * q,unsigned int bufnr,unsigned char * state,unsigned int count,int auto_ack) get_buf_states() argument
235 get_buf_state(struct qdio_q * q,unsigned int bufnr,unsigned char * state,int auto_ack) get_buf_state() argument
242 set_buf_states(struct qdio_q * q,int bufnr,unsigned char state,int count) set_buf_states() argument
264 set_buf_state(struct qdio_q * q,int bufnr,unsigned char state) set_buf_state() argument
273 struct qdio_q *q; qdio_init_buf_states() local
284 qdio_siga_sync(struct qdio_q * q,unsigned int output,unsigned int input) qdio_siga_sync() argument
305 qdio_sync_input_queue(struct qdio_q * q) qdio_sync_input_queue() argument
310 qdio_sync_output_queue(struct qdio_q * q) qdio_sync_output_queue() argument
315 qdio_siga_sync_q(struct qdio_q * q) qdio_siga_sync_q() argument
323 qdio_siga_output(struct qdio_q * q,unsigned int count,unsigned int * busy_bit,dma64_t aob) qdio_siga_output() argument
364 qdio_siga_input(struct qdio_q * q) qdio_siga_input() argument
384 debug_get_buf_state(struct qdio_q * q,unsigned int bufnr,unsigned char * state) debug_get_buf_state() argument
392 qdio_stop_polling(struct qdio_q * q) qdio_stop_polling() argument
405 account_sbals(struct qdio_q * q,unsigned int count) account_sbals() argument
411 process_buffer_error(struct qdio_q * q,unsigned int start,int count) process_buffer_error() argument
430 inbound_handle_work(struct qdio_q * q,unsigned int start,int count,bool auto_ack) inbound_handle_work() argument
442 get_inbound_buffer_frontier(struct qdio_q * q,unsigned int start,unsigned int * error) get_inbound_buffer_frontier() argument
506 struct qdio_q *q; qdio_inspect_input_queue() local
526 qdio_inbound_q_done(struct qdio_q * q,unsigned int start) qdio_inbound_q_done() argument
544 get_outbound_buffer_frontier(struct qdio_q * q,unsigned int start,unsigned int * error) get_outbound_buffer_frontier() argument
610 struct qdio_q *q; qdio_inspect_output_queue() local
630 qdio_kick_outbound_q(struct qdio_q * q,unsigned int count,dma64_t aob) qdio_kick_outbound_q() argument
1210 handle_inbound(struct qdio_q * q,int bufnr,int count) handle_inbound() argument
1269 handle_outbound(struct qdio_q * q,unsigned int bufnr,unsigned int count,struct qaob * aob) handle_outbound() argument
1343 struct qdio_q *q; qdio_start_irq() local
[all...]

12345678910>>...41