Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 594) sorted by relevance

12345678910>>...24

/linux/drivers/gpu/drm/i915/
H A Di915_request.c114 struct i915_request *rq = to_request(fence); in i915_fence_release() local
116 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && in i915_fence_release()
117 rq->guc_prio != GUC_PRIO_FINI); in i915_fence_release()
119 i915_request_free_capture_list(fetch_and_zero(&rq->capture_list)); in i915_fence_release()
120 if (rq->batch_res) { in i915_fence_release()
121 i915_vma_resource_put(rq->batch_res); in i915_fence_release()
122 rq->batch_res = NULL; in i915_fence_release()
132 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
133 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
139 * very careful in what rq in i915_fence_release()
191 __notify_execute_cb(struct i915_request * rq,bool (* fn)(struct irq_work * wrk)) __notify_execute_cb() argument
204 __notify_execute_cb_irq(struct i915_request * rq) __notify_execute_cb_irq() argument
215 i915_request_notify_execute_cb_imm(struct i915_request * rq) i915_request_notify_execute_cb_imm() argument
220 __i915_request_fill(struct i915_request * rq,u8 val) __i915_request_fill() argument
244 i915_request_active_engine(struct i915_request * rq,struct intel_engine_cs ** active) i915_request_active_engine() argument
278 struct i915_request *rq = __rq_watchdog_expired() local
292 __rq_init_watchdog(struct i915_request * rq) __rq_init_watchdog() argument
299 __rq_arm_watchdog(struct i915_request * rq) __rq_arm_watchdog() argument
316 __rq_cancel_watchdog(struct i915_request * rq) __rq_cancel_watchdog() argument
356 i915_request_retire(struct i915_request * rq) i915_request_retire() argument
418 i915_request_retire_upto(struct i915_request * rq) i915_request_retire_upto() argument
440 struct i915_request * const *port, *rq; __request_in_flight() local
501 __await_execution(struct i915_request * rq,struct i915_request * signal,gfp_t gfp) __await_execution() argument
552 __i915_request_skip(struct i915_request * rq) __i915_request_skip() argument
570 i915_request_set_error_once(struct i915_request * rq,int error) i915_request_set_error_once() argument
588 i915_request_mark_eio(struct i915_request * rq) i915_request_mark_eio() argument
761 i915_request_cancel(struct i915_request * rq,int error) i915_request_cancel() argument
810 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); semaphore_notify() local
826 struct i915_request *rq, *rn; retire_requests() local
838 struct i915_request *rq; request_alloc_slow() local
874 struct i915_request *rq = arg; __i915_request_ctor() local
897 struct i915_request *rq; __i915_request_create() local
1030 struct i915_request *rq; i915_request_create() local
1059 i915_request_await_start(struct i915_request * rq,struct i915_request * signal) i915_request_await_start() argument
1125 already_busywaiting(struct i915_request * rq) already_busywaiting() argument
1334 mark_external(struct i915_request * rq) mark_external() argument
1348 __i915_request_await_external(struct i915_request * rq,struct dma_fence * fence) __i915_request_await_external() argument
1358 i915_request_await_external(struct i915_request * rq,struct dma_fence * fence) i915_request_await_external() argument
1383 is_parallel_rq(struct i915_request * rq) is_parallel_rq() argument
1388 request_to_parent(struct i915_request * rq) request_to_parent() argument
1403 i915_request_await_execution(struct i915_request * rq,struct dma_fence * fence) i915_request_await_execution() argument
1500 i915_request_await_dma_fence(struct i915_request * rq,struct dma_fence * fence) i915_request_await_dma_fence() argument
1568 i915_request_await_deps(struct i915_request * rq,const struct i915_deps * deps) i915_request_await_deps() argument
1620 i915_request_await_huc(struct i915_request * rq) i915_request_await_huc() argument
1635 __i915_request_ensure_parallel_ordering(struct i915_request * rq,struct intel_timeline * timeline) __i915_request_ensure_parallel_ordering() argument
1670 __i915_request_ensure_ordering(struct i915_request * rq,struct intel_timeline * timeline) __i915_request_ensure_ordering() argument
1719 __i915_request_add_to_timeline(struct i915_request * rq) __i915_request_add_to_timeline() argument
1786 __i915_request_commit(struct i915_request * rq) __i915_request_commit() argument
1816 __i915_request_queue_bh(struct i915_request * rq) __i915_request_queue_bh() argument
1822 __i915_request_queue(struct i915_request * rq,const struct i915_sched_attr * attr) __i915_request_queue() argument
1844 i915_request_add(struct i915_request * rq) i915_request_add() argument
1901 __i915_spin_request(struct i915_request * const rq,int state) __i915_spin_request() argument
1980 i915_request_wait_timeout(struct i915_request * rq,unsigned int flags,long timeout) i915_request_wait_timeout() argument
2119 i915_request_wait(struct i915_request * rq,unsigned int flags,long timeout) i915_request_wait() argument
2146 queue_status(const struct i915_request * rq) queue_status() argument
2157 run_status(const struct i915_request * rq) run_status() argument
2171 fence_status(const struct i915_request * rq) fence_status() argument
2183 i915_request_show(struct drm_printer * m,const struct i915_request * rq,const char * prefix,int indent) i915_request_show() argument
2237 engine_match_ring(struct intel_engine_cs * engine,struct i915_request * rq) engine_match_ring() argument
2244 match_ring(struct i915_request * rq) match_ring() argument
2264 i915_test_request_state(struct i915_request * rq) i915_test_request_state() argument
[all...]
H A Di915_request.h64 #define RQ_TRACE(rq, fmt, ...) do { \ argument
65 const struct i915_request *rq__ = (rq); \
276 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
378 void __i915_request_skip(struct i915_request *rq);
379 bool i915_request_set_error_once(struct i915_request *rq, int error);
380 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
383 void __i915_request_queue(struct i915_request *rq,
385 void __i915_request_queue_bh(struct i915_request *rq);
387 bool i915_request_retire(struct i915_request *rq);
388 void i915_request_retire_upto(struct i915_request *rq);
400 i915_request_get(struct i915_request * rq) i915_request_get() argument
406 i915_request_get_rcu(struct i915_request * rq) i915_request_get_rcu() argument
412 i915_request_put(struct i915_request * rq) i915_request_put() argument
454 i915_request_signaled(const struct i915_request * rq) i915_request_signaled() argument
460 i915_request_is_active(const struct i915_request * rq) i915_request_is_active() argument
465 i915_request_in_priority_queue(const struct i915_request * rq) i915_request_in_priority_queue() argument
471 i915_request_has_initial_breadcrumb(const struct i915_request * rq) i915_request_has_initial_breadcrumb() argument
484 __hwsp_seqno(const struct i915_request * rq) __hwsp_seqno() argument
504 hwsp_seqno(const struct i915_request * rq) hwsp_seqno() argument
515 __i915_request_has_started(const struct i915_request * rq) __i915_request_has_started() argument
546 i915_request_started(const struct i915_request * rq) i915_request_started() argument
571 i915_request_is_running(const struct i915_request * rq) i915_request_is_running() argument
601 i915_request_is_ready(const struct i915_request * rq) i915_request_is_ready() argument
606 __i915_request_is_complete(const struct i915_request * rq) __i915_request_is_complete() argument
611 i915_request_completed(const struct i915_request * rq) i915_request_completed() argument
627 i915_request_mark_complete(struct i915_request * rq) i915_request_mark_complete() argument
633 i915_request_has_waitboost(const struct i915_request * rq) i915_request_has_waitboost() argument
638 i915_request_has_nopreempt(const struct i915_request * rq) i915_request_has_nopreempt() argument
644 i915_request_has_sentinel(const struct i915_request * rq) i915_request_has_sentinel() argument
649 i915_request_on_hold(const struct i915_request * rq) i915_request_on_hold() argument
654 i915_request_set_hold(struct i915_request * rq) i915_request_set_hold() argument
659 i915_request_clear_hold(struct i915_request * rq) i915_request_clear_hold() argument
665 i915_request_timeline(const struct i915_request * rq) i915_request_timeline() argument
674 i915_request_gem_context(const struct i915_request * rq) i915_request_gem_context() argument
681 i915_request_active_timeline(const struct i915_request * rq) i915_request_active_timeline() argument
693 i915_request_active_seqno(const struct i915_request * rq) i915_request_active_seqno() argument
[all...]
/linux/drivers/scsi/fnic/
H A Dvnic_rq.c15 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
18 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
22 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
23 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
30 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
33 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
34 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
36 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
39 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
47 rq in vnic_rq_alloc_bufs()
53 vnic_rq_free(struct vnic_rq * rq) vnic_rq_free() argument
70 vnic_rq_alloc(struct vnic_dev * vdev,struct vnic_rq * rq,unsigned int index,unsigned int desc_count,unsigned int desc_size) vnic_rq_alloc() argument
99 vnic_rq_init(struct vnic_rq * rq,unsigned int cq_index,unsigned int error_interrupt_enable,unsigned int error_interrupt_offset) vnic_rq_init() argument
125 vnic_rq_error_status(struct vnic_rq * rq) vnic_rq_error_status() argument
130 vnic_rq_enable(struct vnic_rq * rq) vnic_rq_enable() argument
135 vnic_rq_disable(struct vnic_rq * rq) vnic_rq_disable() argument
153 vnic_rq_clean(struct vnic_rq * rq,void (* buf_clean)(struct vnic_rq * rq,struct vnic_rq_buf * buf)) vnic_rq_clean() argument
154 vnic_rq_clean(struct vnic_rq * rq,void (* buf_clean)(struct vnic_rq * rq,struct vnic_rq_buf * buf)) vnic_rq_clean() argument
[all...]
H A Dvnic_rq.h93 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
96 return rq->ring.desc_avail; in vnic_rq_desc_avail()
99 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
102 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
105 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
107 return rq->to_use->desc; in vnic_rq_next_desc()
110 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
112 return rq->to_use->index; in vnic_rq_next_index()
115 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
120 vnic_rq_post(struct vnic_rq * rq,void * os_buf,unsigned int os_buf_index,dma_addr_t dma_addr,unsigned int len) vnic_rq_post() argument
153 vnic_rq_posting_soon(struct vnic_rq * rq) vnic_rq_posting_soon() argument
158 vnic_rq_return_descs(struct vnic_rq * rq,unsigned int count) vnic_rq_return_descs() argument
168 vnic_rq_service(struct vnic_rq * rq,struct cq_desc * cq_desc,u16 completed_index,int desc_return,void (* buf_service)(struct vnic_rq * rq,struct cq_desc * cq_desc,struct vnic_rq_buf * buf,int skipped,void * opaque),void * opaque) vnic_rq_service() argument
170 vnic_rq_service(struct vnic_rq * rq,struct cq_desc * cq_desc,u16 completed_index,int desc_return,void (* buf_service)(struct vnic_rq * rq,struct cq_desc * cq_desc,struct vnic_rq_buf * buf,int skipped,void * opaque),void * opaque) vnic_rq_service() argument
196 vnic_rq_fill(struct vnic_rq * rq,int (* buf_fill)(struct vnic_rq * rq)) vnic_rq_fill() argument
197 vnic_rq_fill(struct vnic_rq * rq,int (* buf_fill)(struct vnic_rq * rq)) vnic_rq_fill() argument
[all...]
/linux/kernel/sched/
H A Dsched.h79 struct rq;
104 extern void calc_global_load_tick(struct rq *this_rq);
105 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
107 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
314 /* nests inside the rq lock: */
361 extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec);
366 * dl_se::rq -- runqueue we belong to.
385 extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *r
708 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ global() member
820 struct rq *rq; /* this is always top-level rq, cache? */ global() member
1091 struct rq { global() struct
1114 uclamprq global() argument
1119 cfsrq global() argument
1120 rtrq global() argument
1121 dlrq global() argument
1123 scxrq global() argument
1129 fair_serverrq global() argument
1130 leaf_cfs_rq_listrq global() argument
1131 tmp_alone_branchrq global() argument
1140 nr_uninterruptiblerq global() argument
1143 donorrq global() argument
1144 currrq global() argument
1146 __anon2a885461040arq global() argument
1151 dl_serverrq global() argument
1152 idlerq global() argument
1153 stoprq global() argument
1154 next_balancerq global() argument
1155 prev_mmrq global() argument
1157 clock_update_flagsrq global() argument
1158 clockrq global() argument
1160 ____cacheline_alignedrq global() argument
1161 clock_peltrq global() argument
1162 lost_idle_timerq global() argument
1163 clock_pelt_idlerq global() argument
1164 clock_idlerq global() argument
1166 clock_pelt_idle_copyrq global() argument
1167 clock_idle_copyrq global() argument
1170 nr_iowaitrq global() argument
1172 last_seen_need_resched_nsrq global() argument
1173 ticks_without_reschedrq global() argument
1176 membarrier_staterq global() argument
1179 rdrq global() argument
1180 sdrq global() argument
1205 avg_irqrq global() argument
1221 prev_irq_timerq global() argument
1222 psi_irq_timerq global() argument
1228 prev_steal_time_rqrq global() argument
1243 rq_sched_inforq global() argument
1244 rq_cpu_timerq global() argument
1268 push_workrq global() argument
1269 corerq global() argument
1286 core_treerq global() argument
1298 cfsb_csd_listrq rq_of() argument
1311 cpu_of(struct rq * rq) cpu_of() argument
1332 rq_set_donor(struct rq * rq,struct task_struct * t) rq_set_donor() argument
1337 rq_set_donor(struct rq * rq,struct task_struct * t) rq_set_donor() argument
1348 sched_core_enabled(struct rq * rq) sched_core_enabled() argument
1362 rq_lockp(struct rq * rq) rq_lockp() argument
1370 __rq_lockp(struct rq * rq) __rq_lockp() argument
1389 sched_cpu_cookie_match(struct rq * rq,struct task_struct * p) sched_cpu_cookie_match() argument
1398 sched_core_cookie_match(struct rq * rq,struct task_struct * p) sched_core_cookie_match() argument
1421 sched_group_cookie_match(struct rq * rq,struct task_struct * p,struct sched_group * group) sched_group_cookie_match() argument
1451 sched_core_enabled(struct rq * rq) sched_core_enabled() argument
1461 rq_lockp(struct rq * rq) rq_lockp() argument
1466 __rq_lockp(struct rq * rq) __rq_lockp() argument
1471 sched_cpu_cookie_match(struct rq * rq,struct task_struct * p) sched_cpu_cookie_match() argument
1476 sched_core_cookie_match(struct rq * rq,struct task_struct * p) sched_core_cookie_match() argument
1481 sched_group_cookie_match(struct rq * rq,struct task_struct * p,struct sched_group * group) sched_group_cookie_match() argument
1508 lockdep_assert_rq_held(struct rq * rq) lockdep_assert_rq_held() argument
1517 raw_spin_rq_lock(struct rq * rq) raw_spin_rq_lock() argument
1522 raw_spin_rq_lock_irq(struct rq * rq) raw_spin_rq_lock_irq() argument
1528 raw_spin_rq_unlock_irq(struct rq * rq) raw_spin_rq_unlock_irq() argument
1534 _raw_spin_rq_lock_irqsave(struct rq * rq) _raw_spin_rq_lock_irqsave() argument
1544 raw_spin_rq_unlock_irqrestore(struct rq * rq,unsigned long flags) raw_spin_rq_unlock_irqrestore() argument
1550 raw_spin_rq_lock_irqsave(rq,flags) global() argument
1558 update_idle_core(struct rq * rq) update_idle_core() argument
1565 update_idle_core(struct rq * rq) update_idle_core() argument
1605 struct rq *rq = task_rq(p); cfs_rq_of() local
1647 assert_clock_updated(struct rq * rq) assert_clock_updated() argument
1656 rq_clock(struct rq * rq) rq_clock() argument
1664 rq_clock_task(struct rq * rq) rq_clock_task() argument
1672 rq_clock_skip_update(struct rq * rq) rq_clock_skip_update() argument
1682 rq_clock_cancel_skipupdate(struct rq * rq) rq_clock_cancel_skipupdate() argument
1697 rq_clock_start_loop_update(struct rq * rq) rq_clock_start_loop_update() argument
1704 rq_clock_stop_loop_update(struct rq * rq) rq_clock_stop_loop_update() argument
1732 scx_rq_clock_update(struct rq * rq,u64 clock) scx_rq_clock_update() argument
1740 scx_rq_clock_invalidate(struct rq * rq) scx_rq_clock_invalidate() argument
1751 scx_rq_clock_update(struct rq * rq,u64 clock) scx_rq_clock_update() argument
1752 scx_rq_clock_invalidate(struct rq * rq) scx_rq_clock_invalidate() argument
1765 rq_pin_lock(struct rq * rq,struct rq_flags * rf) rq_pin_lock() argument
1774 rq_unpin_lock(struct rq * rq,struct rq_flags * rf) rq_unpin_lock() argument
1783 rq_repin_lock(struct rq * rq,struct rq_flags * rf) rq_repin_lock() argument
1802 __task_rq_unlock(struct rq * rq,struct rq_flags * rf) __task_rq_unlock() argument
1810 task_rq_unlock(struct rq * rq,struct task_struct * p,struct rq_flags * rf) task_rq_unlock() argument
1824 rq_lock_irqsave(struct rq * rq,struct rq_flags * rf) rq_lock_irqsave() argument
1831 rq_lock_irq(struct rq * rq,struct rq_flags * rf) rq_lock_irq() argument
1838 rq_lock(struct rq * rq,struct rq_flags * rf) rq_lock() argument
1845 rq_unlock_irqrestore(struct rq * rq,struct rq_flags * rf) rq_unlock_irqrestore() argument
1852 rq_unlock_irq(struct rq * rq,struct rq_flags * rf) rq_unlock_irq() argument
1859 rq_unlock(struct rq * rq,struct rq_flags * rf) rq_unlock() argument
1884 struct rq *rq; this_rq_lock_irq() local
1950 queue_balance_callback(struct rq * rq,struct balance_callback * head,void (* func)(struct rq * rq)) queue_balance_callback() argument
1952 queue_balance_callback(struct rq * rq,struct balance_callback * head,void (* func)(struct rq * rq)) queue_balance_callback() argument
2246 task_current(struct rq * rq,struct task_struct * p) task_current() argument
2257 task_current_donor(struct rq * rq,struct task_struct * p) task_current_donor() argument
2270 task_on_cpu(struct rq * rq,struct task_struct * p) task_on_cpu() argument
2442 put_prev_task(struct rq * rq,struct task_struct * prev) put_prev_task() argument
2448 set_next_task(struct rq * rq,struct task_struct * next) set_next_task() argument
2454 __put_prev_set_next_dl_server(struct rq * rq,struct task_struct * prev,struct task_struct * next) __put_prev_set_next_dl_server() argument
2463 put_prev_set_next_task(struct rq * rq,struct task_struct * prev,struct task_struct * next) put_prev_set_next_task() argument
2533 sched_stop_runnable(struct rq * rq) sched_stop_runnable() argument
2538 sched_dl_runnable(struct rq * rq) sched_dl_runnable() argument
2543 sched_rt_runnable(struct rq * rq) sched_rt_runnable() argument
2548 sched_fair_runnable(struct rq * rq) sched_fair_runnable() argument
2591 get_push_task(struct rq * rq) get_push_task() argument
2614 idle_set_state(struct rq * rq,struct cpuidle_state * idle_state) idle_set_state() argument
2620 idle_get_state(struct rq * rq) idle_get_state() argument
2629 idle_set_state(struct rq * rq,struct cpuidle_state * idle_state) idle_set_state() argument
2634 idle_get_state(struct rq * rq) idle_get_state() argument
2681 sched_update_tick_dependency(struct rq * rq) sched_update_tick_dependency() argument
2695 sched_update_tick_dependency(struct rq * rq) sched_update_tick_dependency() argument
2698 add_nr_running(struct rq * rq,unsigned count) add_nr_running() argument
2713 sub_nr_running(struct rq * rq,unsigned count) sub_nr_running() argument
2724 __block_task(struct rq * rq,struct task_struct * p) __block_task() argument
2801 hrtick_enabled(struct rq * rq) hrtick_enabled() argument
2808 hrtick_enabled_fair(struct rq * rq) hrtick_enabled_fair() argument
2815 hrtick_enabled_dl(struct rq * rq) hrtick_enabled_dl() argument
2826 hrtick_enabled_fair(struct rq * rq) hrtick_enabled_fair() argument
2831 hrtick_enabled_dl(struct rq * rq) hrtick_enabled_dl() argument
2836 hrtick_enabled(struct rq * rq) hrtick_enabled() argument
3097 nohz_balance_exit_idle(struct rq * rq) nohz_balance_exit_idle() argument
3112 sched_core_account_forceidle(struct rq * rq) sched_core_account_forceidle() argument
3120 sched_core_tick(struct rq * rq) sched_core_tick() argument
3128 sched_core_account_forceidle(struct rq * rq) sched_core_account_forceidle() argument
3130 sched_core_tick(struct rq * rq) sched_core_tick() argument
3205 cpufreq_update_util(struct rq * rq,unsigned int flags) cpufreq_update_util() argument
3215 cpufreq_update_util(struct rq * rq,unsigned int flags) cpufreq_update_util() argument
3250 cpu_bw_dl(struct rq * rq) cpu_bw_dl() argument
3255 cpu_util_dl(struct rq * rq) cpu_util_dl() argument
3264 cpu_util_rt(struct rq * rq) cpu_util_rt() argument
3298 uclamp_rq_get(struct rq * rq,enum uclamp_id clamp_id) uclamp_rq_get() argument
3304 uclamp_rq_set(struct rq * rq,enum uclamp_id clamp_id,unsigned int value) uclamp_rq_set() argument
3310 uclamp_rq_is_idle(struct rq * rq) uclamp_rq_is_idle() argument
3316 uclamp_rq_is_capped(struct rq * rq) uclamp_rq_is_capped() argument
3370 uclamp_rq_is_capped(struct rq * rq) uclamp_rq_is_capped() argument
3380 uclamp_rq_get(struct rq * rq,enum uclamp_id clamp_id) uclamp_rq_get() argument
3389 uclamp_rq_set(struct rq * rq,enum uclamp_id clamp_id,unsigned int value) uclamp_rq_set() argument
3393 uclamp_rq_is_idle(struct rq * rq) uclamp_rq_is_idle() argument
3402 cpu_util_irq(struct rq * rq) cpu_util_irq() argument
3419 cpu_util_irq(struct rq * rq) cpu_util_irq() argument
3461 membarrier_switch_mm(struct rq * rq,struct mm_struct * prev_mm,struct mm_struct * next_mm) membarrier_switch_mm() argument
3479 membarrier_switch_mm(struct rq * rq,struct mm_struct * prev_mm,struct mm_struct * next_mm) membarrier_switch_mm() argument
3647 mm_cid_snapshot_time(struct rq * rq,struct mm_struct * mm) mm_cid_snapshot_time() argument
3655 __mm_cid_get(struct rq * rq,struct task_struct * t,struct mm_struct * mm) __mm_cid_get() argument
3709 mm_cid_get(struct rq * rq,struct task_struct * t,struct mm_struct * mm) mm_cid_get() argument
3734 switch_mm_cid(struct rq * rq,struct task_struct * prev,struct task_struct * next) switch_mm_cid() argument
3786 switch_mm_cid(struct rq * rq,struct task_struct * prev,struct task_struct * next) switch_mm_cid() argument
3789 task_tick_mm_cid(struct rq * rq,struct task_struct * curr) task_tick_mm_cid() argument
3807 task_is_pushable(struct rq * rq,struct task_struct * p,int cpu) task_is_pushable() argument
[all...]
H A Dpelt.h11 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
12 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
13 bool update_other_load_avgs(struct rq *rq);
16 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
18 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg() argument
24 update_hw_load_avg(u64 now,struct rq * rq,u64 capacity) update_hw_load_avg() argument
29 hw_load_avg(struct rq * rq) hw_load_avg() argument
39 update_irq_load_avg(struct rq * rq,u64 running) update_irq_load_avg() argument
69 rq_clock_pelt(struct rq * rq) rq_clock_pelt() argument
78 _update_idle_rq_clock_pelt(struct rq * rq) _update_idle_rq_clock_pelt() argument
100 update_rq_clock_pelt(struct rq * rq,s64 delta) update_rq_clock_pelt() argument
138 update_idle_rq_clock_pelt(struct rq * rq) update_idle_rq_clock_pelt() argument
[all...]
H A Ddeadline.c71 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
73 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
76 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se) in rq_of_dl_se()
78 struct rq *rq = dl_se->rq; in rq_of_dl_se() local
81 rq = task_rq(dl_task_of(dl_se)); in rq_of_dl_se()
83 return rq; in rq_of_dl_se()
156 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
192 struct rq *r in __dl_update() local
297 dl_rq_change_utilization(struct rq * rq,struct sched_dl_entity * dl_se,u64 new_bw) dl_rq_change_utilization() argument
411 struct rq *rq = rq_of_dl_se(dl_se); task_non_contending() local
540 dl_overloaded(struct rq * rq) dl_overloaded() argument
545 dl_set_overload(struct rq * rq) dl_set_overload() argument
561 dl_clear_overload(struct rq * rq) dl_clear_overload() argument
578 has_pushable_dl_tasks(struct rq * rq) has_pushable_dl_tasks() argument
587 enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p) enqueue_pushable_dl_task() argument
605 dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p) dequeue_pushable_dl_task() argument
628 need_pull_dl_task(struct rq * rq,struct task_struct * prev) need_pull_dl_task() argument
639 deadline_queue_push_tasks(struct rq * rq) deadline_queue_push_tasks() argument
647 deadline_queue_pull_task(struct rq * rq) deadline_queue_pull_task() argument
654 dl_task_offline_migration(struct rq * rq,struct task_struct * p) dl_task_offline_migration() argument
731 replenish_dl_new_period(struct sched_dl_entity * dl_se,struct rq * rq) replenish_dl_new_period() argument
762 struct rq *rq = rq_of_dl_rq(dl_rq); setup_new_dl_entity() local
809 struct rq *rq = rq_of_dl_rq(dl_rq); replenish_dl_entity() local
973 update_dl_revised_wakeup(struct sched_dl_entity * dl_se,struct rq * rq) update_dl_revised_wakeup() argument
1036 struct rq *rq = rq_of_dl_se(dl_se); update_dl_entity() local
1080 struct rq *rq = rq_of_dl_rq(dl_rq); start_dl_timer() local
1135 __push_dl_task(struct rq * rq,struct rq_flags * rf) __push_dl_task() argument
1159 struct rq *rq = rq_of_dl_se(dl_se); dl_server_timer() local
1231 struct rq *rq; dl_task_timer() local
1346 struct rq *rq = rq_of_dl_se(dl_se); dl_check_constrained_dl() local
1381 grub_reclaim(u64 delta,struct rq * rq,struct sched_dl_entity * dl_se) grub_reclaim() argument
1401 dl_scaled_delta_exec(struct rq * rq,struct sched_dl_entity * dl_se,s64 delta_exec) dl_scaled_delta_exec() argument
1429 update_curr_dl_se(struct rq * rq,struct sched_dl_entity * dl_se,s64 delta_exec) update_curr_dl_se() argument
1552 dl_server_update_idle_time(struct rq * rq,struct task_struct * p) dl_server_update_idle_time() argument
1588 struct rq *rq = dl_se->rq; dl_server_start() local
1625 dl_server_init(struct sched_dl_entity * dl_se,struct rq * rq,dl_server_has_tasks_f has_tasks,dl_server_pick_f pick_task) dl_server_init() argument
1637 struct rq *rq; sched_init_dl_servers() local
1660 __dl_server_attach_root(struct sched_dl_entity * dl_se,struct rq * rq) __dl_server_attach_root() argument
1679 struct rq *rq = dl_se->rq; dl_server_apply_params() local
1722 update_curr_dl(struct rq * rq) update_curr_dl() argument
1750 struct rq *rq; inactive_task_timer() local
1813 struct rq *rq = rq_of_dl_rq(dl_rq); inc_dl_deadline() local
1826 struct rq *rq = rq_of_dl_rq(dl_rq); dec_dl_deadline() local
2071 enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags) enqueue_task_dl() argument
2131 dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags) dequeue_task_dl() argument
2155 yield_task_dl(struct rq * rq) yield_task_dl() argument
2176 dl_task_is_earliest_deadline(struct task_struct * p,struct rq * rq) dl_task_is_earliest_deadline() argument
2190 struct rq *rq; select_task_rq_dl() local
2238 struct rq *rq; migrate_task_rq_dl() local
2267 check_preempt_equal_dl(struct rq * rq,struct task_struct * p) check_preempt_equal_dl() argument
2288 balance_dl(struct rq * rq,struct task_struct * p,struct rq_flags * rf) balance_dl() argument
2309 wakeup_preempt_dl(struct rq * rq,struct task_struct * p,int flags) wakeup_preempt_dl() argument
2327 start_hrtick_dl(struct rq * rq,struct sched_dl_entity * dl_se) start_hrtick_dl() argument
2332 start_hrtick_dl(struct rq * rq,struct sched_dl_entity * dl_se) start_hrtick_dl() argument
2337 set_next_task_dl(struct rq * rq,struct task_struct * p,bool first) set_next_task_dl() argument
2375 __pick_task_dl(struct rq * rq) __pick_task_dl() argument
2405 pick_task_dl(struct rq * rq) pick_task_dl() argument
2410 put_prev_task_dl(struct rq * rq,struct task_struct * p,struct task_struct * next) put_prev_task_dl() argument
2437 task_tick_dl(struct rq * rq,struct task_struct * p,int queued) task_tick_dl() argument
2467 pick_earliest_pushable_dl_task(struct rq * rq,int cpu) pick_earliest_pushable_dl_task() argument
2578 find_lock_later_rq(struct task_struct * task,struct rq * rq) find_lock_later_rq() argument
2632 pick_next_pushable_dl_task(struct rq * rq) pick_next_pushable_dl_task() argument
2656 push_dl_task(struct rq * rq) push_dl_task() argument
2729 push_dl_tasks(struct rq * rq) push_dl_tasks() argument
2830 task_woken_dl(struct rq * rq,struct task_struct * p) task_woken_dl() argument
2846 struct rq *rq; set_cpus_allowed_dl() local
2876 rq_online_dl(struct rq * rq) rq_online_dl() argument
2887 rq_offline_dl(struct rq * rq) rq_offline_dl() argument
2908 struct rq *rq; dl_add_task_root_domain() local
2960 switched_from_dl(struct rq * rq,struct task_struct * p) switched_from_dl() argument
3014 switched_to_dl(struct rq * rq,struct task_struct * p) switched_to_dl() argument
3047 prio_changed_dl(struct rq * rq,struct task_struct * p,int oldprio) prio_changed_dl() argument
[all...]
H A Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
15 if (rq) { in rq_sched_info_arrive()
16 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
17 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
27 if (rq) in rq_sched_info_depart()
28 rq->rq_cpu_time += delta; in rq_sched_info_depart()
32 rq_sched_info_dequeue(struct rq *r argument
72 rq_sched_info_arrive(struct rq * rq,unsigned long long delta) rq_sched_info_arrive() argument
73 rq_sched_info_dequeue(struct rq * rq,unsigned long long delta) rq_sched_info_dequeue() argument
74 rq_sched_info_depart(struct rq * rq,unsigned long long delta) rq_sched_info_depart() argument
85 __update_stats_wait_start(rq,p,stats) global() argument
86 __update_stats_wait_end(rq,p,stats) global() argument
87 __update_stats_enqueue_sleeper(rq,p,stats) global() argument
116 psi_account_irqtime(struct rq * rq,struct task_struct * curr,struct task_struct * prev) psi_account_irqtime() argument
205 struct rq *rq; psi_ttwu_dequeue() local
230 psi_account_irqtime(struct rq * rq,struct task_struct * curr,struct task_struct * prev) psi_account_irqtime() argument
241 sched_info_dequeue(struct rq * rq,struct task_struct * t) sched_info_dequeue() argument
263 sched_info_arrive(struct rq * rq,struct task_struct * t) sched_info_arrive() argument
289 sched_info_enqueue(struct rq * rq,struct task_struct * t) sched_info_enqueue() argument
303 sched_info_depart(struct rq * rq,struct task_struct * t) sched_info_depart() argument
319 sched_info_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next) sched_info_switch() argument
334 sched_info_enqueue(rq,t) global() argument
335 sched_info_dequeue(rq,t) global() argument
336 sched_info_switch(rq,t,next) global() argument
[all...]
H A Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
21 return sched_stop_runnable(rq); in balance_stop()
25 wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags) in wakeup_preempt_stop() argument
30 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
32 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
35 static struct task_struct *pick_task_stop(struct rq *rq) in pick_task_stop() argument
44 enqueue_task_stop(struct rq * rq,struct task_struct * p,int flags) enqueue_task_stop() argument
50 dequeue_task_stop(struct rq * rq,struct task_struct * p,int flags) dequeue_task_stop() argument
56 yield_task_stop(struct rq * rq) yield_task_stop() argument
61 put_prev_task_stop(struct rq * rq,struct task_struct * prev,struct task_struct * next) put_prev_task_stop() argument
74 task_tick_stop(struct rq * rq,struct task_struct * curr,int queued) task_tick_stop() argument
78 switched_to_stop(struct rq * rq,struct task_struct * p) switched_to_stop() argument
84 prio_changed_stop(struct rq * rq,struct task_struct * p,int oldprio) prio_changed_stop() argument
89 update_curr_stop(struct rq * rq) update_curr_stop() argument
[all...]
H A Drt.c178 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
182 return rt_rq->rq; in rq_of_rt_rq()
191 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
196 return rt_rq->rq; in rq_of_rt_se()
230 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
234 rt_rq->rq = rq; in init_tg_rt_entry()
244 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
304 static inline struct rq *rq_of_rt_r
318 struct rq *rq = rq_of_rt_se(rt_se); rt_rq_of_se() local
333 need_pull_rt_task(struct rq * rq,struct task_struct * prev) need_pull_rt_task() argument
339 rt_overloaded(struct rq * rq) rt_overloaded() argument
344 rt_set_overload(struct rq * rq) rt_set_overload() argument
363 rt_clear_overload(struct rq * rq) rt_clear_overload() argument
373 has_pushable_tasks(struct rq * rq) has_pushable_tasks() argument
384 rt_queue_push_tasks(struct rq * rq) rt_queue_push_tasks() argument
392 rt_queue_pull_task(struct rq * rq) rt_queue_pull_task() argument
397 enqueue_pushable_task(struct rq * rq,struct task_struct * p) enqueue_pushable_task() argument
413 dequeue_pushable_task(struct rq * rq,struct task_struct * p) dequeue_pushable_task() argument
511 for_each_rt_rq(rt_rq,iter,rq) global() argument
530 struct rq *rq = rq_of_rt_rq(rt_rq); sched_rt_rq_enqueue() local
660 __disable_runtime(struct rq * rq) __disable_runtime() argument
742 __enable_runtime(struct rq * rq) __enable_runtime() argument
800 struct rq *rq = rq_of_rt_rq(rt_rq); do_sched_rt_period_timer() local
910 for_each_rt_rq(rt_rq,iter,rq) global() argument
923 struct rq *rq = rq_of_rt_rq(rt_rq); sched_rt_rq_enqueue() local
953 __enable_runtime(struct rq * rq) __enable_runtime() argument
954 __disable_runtime(struct rq * rq) __disable_runtime() argument
974 update_curr_rt(struct rq * rq) update_curr_rt() argument
1013 struct rq *rq = rq_of_rt_rq(rt_rq); dequeue_top_rt_rq() local
1030 struct rq *rq = rq_of_rt_rq(rt_rq); enqueue_top_rt_rq() local
1052 struct rq *rq = rq_of_rt_rq(rt_rq); inc_rt_prio_smp() local
1067 struct rq *rq = rq_of_rt_rq(rt_rq); dec_rt_prio_smp() local
1400 struct rq *rq = rq_of_rt_se(rt_se); enqueue_rt_entity() local
1412 struct rq *rq = rq_of_rt_se(rt_se); dequeue_rt_entity() local
1431 enqueue_task_rt(struct rq * rq,struct task_struct * p,int flags) enqueue_task_rt() argument
1450 dequeue_task_rt(struct rq * rq,struct task_struct * p,int flags) dequeue_task_rt() argument
1480 requeue_task_rt(struct rq * rq,struct task_struct * p,int head) requeue_task_rt() argument
1491 yield_task_rt(struct rq * rq) yield_task_rt() argument
1502 struct rq *rq; select_task_rq_rt() local
1571 check_preempt_equal_prio(struct rq * rq,struct task_struct * p) check_preempt_equal_prio() argument
1594 balance_rt(struct rq * rq,struct task_struct * p,struct rq_flags * rf) balance_rt() argument
1614 wakeup_preempt_rt(struct rq * rq,struct task_struct * p,int flags) wakeup_preempt_rt() argument
1639 set_next_task_rt(struct rq * rq,struct task_struct * p,bool first) set_next_task_rt() argument
1683 _pick_next_task_rt(struct rq * rq) _pick_next_task_rt() argument
1698 pick_task_rt(struct rq * rq) pick_task_rt() argument
1710 put_prev_task_rt(struct rq * rq,struct task_struct * p,struct task_struct * next) put_prev_task_rt() argument
1739 pick_highest_pushable_task(struct rq * rq,int cpu) pick_highest_pushable_task() argument
1848 pick_next_pushable_task(struct rq * rq) pick_next_pushable_task() argument
1870 find_lock_lowest_rq(struct task_struct * task,struct rq * rq) find_lock_lowest_rq() argument
1933 push_rt_task(struct rq * rq,bool pull) push_rt_task() argument
2051 push_rt_tasks(struct rq * rq) push_rt_tasks() argument
2158 tell_cpu_to_push(struct rq * rq) tell_cpu_to_push() argument
2196 struct rq *rq; rto_push_irq_work_func() local
2342 task_woken_rt(struct rq * rq,struct task_struct * p) task_woken_rt() argument
2356 rq_online_rt(struct rq * rq) rq_online_rt() argument
2367 rq_offline_rt(struct rq * rq) rq_offline_rt() argument
2381 switched_from_rt(struct rq * rq,struct task_struct * p) switched_from_rt() argument
2411 switched_to_rt(struct rq * rq,struct task_struct * p) switched_to_rt() argument
2440 prio_changed_rt(struct rq * rq,struct task_struct * p,int oldprio) prio_changed_rt() argument
2471 watchdog(struct rq * rq,struct task_struct * p) watchdog() argument
2495 watchdog(struct rq * rq,struct task_struct * p) watchdog() argument
2506 task_tick_rt(struct rq * rq,struct task_struct * p,int queued) task_tick_rt() argument
2540 get_rr_interval_rt(struct rq * rq,struct task_struct * task) get_rr_interval_rt() argument
[all...]
/linux/drivers/net/ethernet/cisco/enic/
H A Dvnic_rq.c18 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
21 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
25 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
26 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
31 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
34 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
35 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
37 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
40 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
48 rq in vnic_rq_alloc_bufs()
53 vnic_rq_free(struct vnic_rq * rq) vnic_rq_free() argument
72 vnic_rq_alloc(struct vnic_dev * vdev,struct vnic_rq * rq,unsigned int index,unsigned int desc_count,unsigned int desc_size) vnic_rq_alloc() argument
101 vnic_rq_init_start(struct vnic_rq * rq,unsigned int cq_index,unsigned int fetch_index,unsigned int posted_index,unsigned int error_interrupt_enable,unsigned int error_interrupt_offset) vnic_rq_init_start() argument
125 vnic_rq_init(struct vnic_rq * rq,unsigned int cq_index,unsigned int error_interrupt_enable,unsigned int error_interrupt_offset) vnic_rq_init() argument
133 vnic_rq_error_status(struct vnic_rq * rq) vnic_rq_error_status() argument
138 vnic_rq_enable(struct vnic_rq * rq) vnic_rq_enable() argument
143 vnic_rq_disable(struct vnic_rq * rq) vnic_rq_disable() argument
171 vnic_rq_clean(struct vnic_rq * rq,void (* buf_clean)(struct vnic_rq * rq,struct vnic_rq_buf * buf)) vnic_rq_clean() argument
172 vnic_rq_clean(struct vnic_rq * rq,void (* buf_clean)(struct vnic_rq * rq,struct vnic_rq_buf * buf)) vnic_rq_clean() argument
[all...]
H A Dvnic_rq.h86 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
89 return rq->ring.desc_avail; in vnic_rq_desc_avail()
92 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
95 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
98 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
100 return rq->to_use->desc; in vnic_rq_next_desc()
103 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
105 return rq->to_use->index; in vnic_rq_next_index()
108 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
143 vnic_rq_return_descs(struct vnic_rq * rq,unsigned int count) vnic_rq_return_descs() argument
153 vnic_rq_service(struct vnic_rq * rq,struct cq_desc * cq_desc,u16 completed_index,int desc_return,void (* buf_service)(struct vnic_rq * rq,struct cq_desc * cq_desc,struct vnic_rq_buf * buf,int skipped,void * opaque),void * opaque) vnic_rq_service() argument
155 vnic_rq_service(struct vnic_rq * rq,struct cq_desc * cq_desc,u16 completed_index,int desc_return,void (* buf_service)(struct vnic_rq * rq,struct cq_desc * cq_desc,struct vnic_rq_buf * buf,int skipped,void * opaque),void * opaque) vnic_rq_service() argument
181 vnic_rq_fill(struct vnic_rq * rq,int (* buf_fill)(struct vnic_rq * rq)) vnic_rq_fill() argument
182 vnic_rq_fill(struct vnic_rq * rq,int (* buf_fill)(struct vnic_rq * rq)) vnic_rq_fill() argument
[all...]
/linux/fs/erofs/
H A Ddecompressor.c50 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq, in z_erofs_lz4_prepare_dstpages() argument
57 EROFS_SB(rq->sb)->lz4.max_distance_pages; in z_erofs_lz4_prepare_dstpages()
62 for (i = j = 0; i < rq->outpages; ++i, ++j) { in z_erofs_lz4_prepare_dstpages()
63 struct page *const page = rq->out[i]; in z_erofs_lz4_prepare_dstpages()
70 if (!rq->fillgaps && test_bit(j, bounced)) { in z_erofs_lz4_prepare_dstpages()
73 availables[top++] = rq->out[i - lz4_max_distance_pages]; in z_erofs_lz4_prepare_dstpages()
98 victim = __erofs_allocpage(pagepool, rq->gfp, true); in z_erofs_lz4_prepare_dstpages()
103 rq->out[i] = victim; in z_erofs_lz4_prepare_dstpages()
108 static void *z_erofs_lz4_handle_overlap(struct z_erofs_decompress_req *rq, in z_erofs_lz4_handle_overlap() argument
116 if (rq in z_erofs_lz4_handle_overlap()
178 z_erofs_fixup_insize(struct z_erofs_decompress_req * rq,const char * padbuf,unsigned int padbufsize) z_erofs_fixup_insize() argument
191 z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req * rq,u8 * dst) z_erofs_lz4_decompress_mem() argument
253 z_erofs_lz4_decompress(struct z_erofs_decompress_req * rq,struct page ** pagepool) z_erofs_lz4_decompress() argument
288 z_erofs_transform_plain(struct z_erofs_decompress_req * rq,struct page ** pagepool) z_erofs_transform_plain() argument
343 struct z_erofs_decompress_req *rq = dctx->rq; z_erofs_stream_switch_bufs() local
[all...]
H A Dfileio.c18 struct erofs_fileio_rq *rq; member
23 struct erofs_fileio_rq *rq = in erofs_fileio_ki_complete() local
28 if (ret != rq->bio.bi_iter.bi_size) { in erofs_fileio_ki_complete()
29 bio_advance(&rq->bio, ret); in erofs_fileio_ki_complete()
30 zero_fill_bio(&rq->bio); in erofs_fileio_ki_complete()
34 if (rq->bio.bi_end_io) { in erofs_fileio_ki_complete()
35 if (ret < 0 && !rq->bio.bi_status) in erofs_fileio_ki_complete()
36 rq->bio.bi_status = errno_to_blk_status(ret); in erofs_fileio_ki_complete()
37 rq->bio.bi_end_io(&rq in erofs_fileio_ki_complete()
48 erofs_fileio_rq_submit(struct erofs_fileio_rq * rq) erofs_fileio_rq_submit() argument
73 struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq), erofs_fileio_rq_alloc() local
[all...]
/linux/drivers/gpu/drm/i915/gt/
H A Dgen8_engine_cs.c13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_rcs() argument
42 if (GRAPHICS_VER(rq->i915) == 9) in gen8_emit_flush_rcs()
46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0)) in gen8_emit_flush_rcs()
58 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs()
74 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs()
79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_xcs() argument
83 cs = intel_ring_begin(rq, 4); in gen8_emit_flush_xcs()
99 if (rq->engine->class == VIDEO_DECODE_CLASS) in gen8_emit_flush_xcs()
107 intel_ring_advance(rq, c in gen8_emit_flush_xcs()
112 gen11_emit_flush_rcs(struct i915_request * rq,u32 mode) gen11_emit_flush_rcs() argument
223 mtl_dummy_pipe_control(struct i915_request * rq) mtl_dummy_pipe_control() argument
244 gen12_emit_flush_rcs(struct i915_request * rq,u32 mode) gen12_emit_flush_rcs() argument
362 gen12_emit_flush_xcs(struct i915_request * rq,u32 mode) gen12_emit_flush_xcs() argument
422 hwsp_offset(const struct i915_request * rq) hwsp_offset() argument
434 gen8_emit_init_breadcrumb(struct i915_request * rq) gen8_emit_init_breadcrumb() argument
481 __xehp_emit_bb_start(struct i915_request * rq,u64 offset,u32 len,const unsigned int flags,u32 arb) __xehp_emit_bb_start() argument
522 xehp_emit_bb_start_noarb(struct i915_request * rq,u64 offset,u32 len,const unsigned int flags) xehp_emit_bb_start_noarb() argument
529 xehp_emit_bb_start(struct i915_request * rq,u64 offset,u32 len,const unsigned int flags) xehp_emit_bb_start() argument
536 gen8_emit_bb_start_noarb(struct i915_request * rq,u64 offset,u32 len,const unsigned int flags) gen8_emit_bb_start_noarb() argument
572 gen8_emit_bb_start(struct i915_request * rq,u64 offset,u32 len,const unsigned int flags) gen8_emit_bb_start() argument
600 assert_request_valid(struct i915_request * rq) assert_request_valid() argument
613 gen8_emit_wa_tail(struct i915_request * rq,u32 * cs) gen8_emit_wa_tail() argument
626 emit_preempt_busywait(struct i915_request * rq,u32 * cs) emit_preempt_busywait() argument
642 gen8_emit_fini_breadcrumb_tail(struct i915_request * rq,u32 * cs) gen8_emit_fini_breadcrumb_tail() argument
657 emit_xcs_breadcrumb(struct i915_request * rq,u32 * cs) emit_xcs_breadcrumb() argument
662 gen8_emit_fini_breadcrumb_xcs(struct i915_request * rq,u32 * cs) gen8_emit_fini_breadcrumb_xcs() argument
667 gen8_emit_fini_breadcrumb_rcs(struct i915_request * rq,u32 * cs) gen8_emit_fini_breadcrumb_rcs() argument
687 gen11_emit_fini_breadcrumb_rcs(struct i915_request * rq,u32 * cs) gen11_emit_fini_breadcrumb_rcs() argument
727 gen12_emit_preempt_busywait(struct i915_request * rq,u32 * cs) gen12_emit_preempt_busywait() argument
746 hold_switchout_semaphore_offset(struct i915_request * rq) hold_switchout_semaphore_offset() argument
755 hold_switchout_emit_wa_busywait(struct i915_request * rq,u32 * cs) hold_switchout_emit_wa_busywait() argument
784 gen12_emit_fini_breadcrumb_tail(struct i915_request * rq,u32 * cs) gen12_emit_fini_breadcrumb_tail() argument
805 gen12_emit_fini_breadcrumb_xcs(struct i915_request * rq,u32 * cs) gen12_emit_fini_breadcrumb_xcs() argument
812 gen12_emit_fini_breadcrumb_rcs(struct i915_request * rq,u32 * cs) gen12_emit_fini_breadcrumb_rcs() argument
[all...]
H A Dgen6_engine_cs.c55 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument
58 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
72 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
74 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
84 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
89 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument
92 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
98 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs()
130 cs = intel_ring_begin(rq, in gen6_emit_flush_rcs()
143 gen6_emit_breadcrumb_rcs(struct i915_request * rq,u32 * cs) gen6_emit_breadcrumb_rcs() argument
178 mi_flush_dw(struct i915_request * rq,u32 flags) mi_flush_dw() argument
214 gen6_flush_dw(struct i915_request * rq,u32 mode,u32 invflags) gen6_flush_dw() argument
219 gen6_emit_flush_xcs(struct i915_request * rq,u32 mode) gen6_emit_flush_xcs() argument
224 gen6_emit_flush_vcs(struct i915_request * rq,u32 mode) gen6_emit_flush_vcs() argument
229 gen6_emit_bb_start(struct i915_request * rq,u64 offset,u32 len,unsigned int dispatch_flags) gen6_emit_bb_start() argument
251 hsw_emit_bb_start(struct i915_request * rq,u64 offset,u32 len,unsigned int dispatch_flags) hsw_emit_bb_start() argument
272 gen7_stall_cs(struct i915_request * rq) gen7_stall_cs() argument
289 gen7_emit_flush_rcs(struct i915_request * rq,u32 mode) gen7_emit_flush_rcs() argument
353 gen7_emit_breadcrumb_rcs(struct i915_request * rq,u32 * cs) gen7_emit_breadcrumb_rcs() argument
375 gen6_emit_breadcrumb_xcs(struct i915_request * rq,u32 * cs) gen6_emit_breadcrumb_xcs() argument
393 gen7_emit_breadcrumb_xcs(struct i915_request * rq,u32 * cs) gen7_emit_breadcrumb_xcs() argument
[all...]
H A Dintel_breadcrumbs.c111 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument
113 if (rq->context != ce) in check_signal_order()
116 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order()
117 i915_seqno_passed(rq->fence.seqno, in check_signal_order()
118 list_next_entry(rq, signal_link)->fence.seqno)) in check_signal_order()
121 if (!list_is_first(&rq->signal_link, &ce->signals) && in check_signal_order()
122 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, in check_signal_order()
123 rq->fence.seqno)) in check_signal_order()
212 struct i915_request *rq; in signal_irq_work() local
214 list_for_each_entry_rcu(rq, in signal_irq_work()
250 struct i915_request *rq = signal_irq_work() local
337 irq_signal_request(struct i915_request * rq,struct intel_breadcrumbs * b) irq_signal_request() argument
348 insert_breadcrumb(struct i915_request * rq) insert_breadcrumb() argument
410 i915_request_enable_breadcrumb(struct i915_request * rq) i915_request_enable_breadcrumb() argument
435 i915_request_cancel_breadcrumb(struct i915_request * rq) i915_request_cancel_breadcrumb() argument
462 struct i915_request *rq, *rn; intel_context_remove_breadcrumbs() local
495 struct i915_request *rq; print_signals() local
[all...]
H A Dselftest_execlists.c28 static bool is_active(struct i915_request *rq) in is_active() argument
30 if (i915_request_is_active(rq)) in is_active()
33 if (i915_request_on_hold(rq)) in is_active()
36 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active()
43 struct i915_request *rq, in wait_for_submit() argument
53 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit()
58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
69 struct i915_request *rq, in wait_for_reset() argument
81 if (i915_request_completed(rq)) in wait_for_reset()
125 struct i915_request *rq; live_sanitycheck() local
182 struct i915_request *rq[2]; live_unlite_restore() local
344 struct i915_request *rq; live_unlite_ring() local
493 struct i915_request *rq; live_pin_rewind() local
601 struct i915_request *rq; live_hold_reset() local
720 struct i915_request *rq; live_error_interrupt() local
820 emit_semaphore_chain(struct i915_request * rq,struct i915_vma * vma,int idx) emit_semaphore_chain() argument
860 struct i915_request *rq; semaphore_queue() local
895 struct i915_request *rq; release_queue() local
946 struct i915_request *rq; slice_semaphore_queue() local
1056 struct i915_request *rq; create_rewinder() local
1130 struct i915_request *rq[3] = {}; live_timeslice_rewind() local
1259 struct i915_request *rq; nop_request() local
1330 struct i915_request *rq, *nop; live_timeslice_queue() local
1430 struct i915_request *rq; live_timeslice_nopreempt() local
1721 struct i915_request *rq; spinner_create_request() local
1759 struct i915_request *rq; live_preempt() local
1852 struct i915_request *rq; live_late_preempt() local
2058 struct i915_request *rq; __cancel_active0() local
2102 struct i915_request *rq[2] = {}; __cancel_active1() local
2173 struct i915_request *rq[3] = {}; __cancel_queued() local
2268 struct i915_request *rq; __cancel_hostile() local
2325 struct i915_request *rq; __cancel_fail() local
2576 struct i915_request *rq; live_chain_preempt() local
2706 struct i915_request *rq; create_gang() local
2801 struct i915_request *rq; __live_preempt_ring() local
2975 struct i915_request *rq = NULL; live_preempt_gang() local
3153 struct i915_request *rq; create_gpr_client() local
3214 struct i915_request *rq; preempt_user() local
3299 struct i915_request *rq; live_preempt_user() local
3398 struct i915_request *rq; live_preempt_timeout() local
3490 struct i915_request *rq; smoke_submit() local
3754 struct i915_request *rq; nop_virtual_engine() local
3771 struct i915_request *rq; nop_virtual_engine() local
4027 struct i915_request *rq; slicein_virtual_engine() local
4094 struct i915_request *rq; sliceout_virtual_engine() local
4226 struct i915_request *rq; preserved_virtual_engine() local
4337 struct i915_request *rq; reset_virtual_engine() local
[all...]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Drx.c19 int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) in mlx5e_xsk_alloc_rx_mpwqe() argument
21 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); in mlx5e_xsk_alloc_rx_mpwqe()
22 struct mlx5e_icosq *icosq = rq->icosq; in mlx5e_xsk_alloc_rx_mpwqe()
30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe()
35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs, in mlx5e_xsk_alloc_rx_mpwqe()
36 rq->mpwqe.pages_per_wqe); in mlx5e_xsk_alloc_rx_mpwqe()
44 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { in mlx5e_xsk_alloc_rx_mpwqe()
45 xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe()
50 pi = mlx5e_icosq_get_next_pi(icosq, rq in mlx5e_xsk_alloc_rx_mpwqe()
160 mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq * rq,u16 ix,int wqe_bulk) mlx5e_xsk_alloc_rx_wqes_batched() argument
198 mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk) mlx5e_xsk_alloc_rx_wqes() argument
225 mlx5e_xsk_construct_skb(struct mlx5e_rq * rq,struct xdp_buff * xdp) mlx5e_xsk_construct_skb() argument
247 mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx) mlx5e_xsk_skb_from_cqe_mpwrq_linear() argument
304 mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt) mlx5e_xsk_skb_from_cqe_linear() argument
[all...]
/linux/drivers/scsi/esas2r/
H A Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
160 struct esas2r_request *rq = &a->general_req; esas2r_disc_check_for_work() local
313 struct esas2r_request *rq = &a->general_req; esas2r_disc_start_port() local
387 esas2r_disc_continue(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_continue() argument
460 esas2r_disc_start_request(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_start_request() argument
489 esas2r_disc_local_start_request(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_local_start_request() argument
503 esas2r_disc_abort(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_abort() argument
518 esas2r_disc_block_dev_scan(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_block_dev_scan() argument
549 esas2r_disc_block_dev_scan_cb(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_block_dev_scan_cb() argument
578 esas2r_disc_raid_grp_info(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_raid_grp_info() argument
625 esas2r_disc_raid_grp_info_cb(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_raid_grp_info_cb() argument
688 esas2r_disc_part_info(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_part_info() argument
740 esas2r_disc_part_info_cb(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_part_info_cb() argument
789 esas2r_disc_passthru_dev_info(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_passthru_dev_info() argument
826 esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_passthru_dev_info_cb() argument
882 esas2r_disc_passthru_dev_addr(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_passthru_dev_addr() argument
940 esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_passthru_dev_addr_cb() argument
1045 esas2r_disc_dev_remove(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_dev_remove() argument
1084 esas2r_disc_dev_add(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_disc_dev_add() argument
1163 struct esas2r_request *rq; esas2r_disc_fix_curr_requests() local
[all...]
H A Desas2r_vda.c59 static void clear_vda_request(struct esas2r_request *rq);
62 struct esas2r_request *rq);
67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument
93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl()
96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl()
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl()
115 memcpy(rq in esas2r_process_vda_ioctl()
270 esas2r_complete_vda_ioctl(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_complete_vda_ioctl() argument
347 esas2r_build_flash_req(struct esas2r_adapter * a,struct esas2r_request * rq,u8 sub_func,u8 cksum,u32 addr,u32 length) esas2r_build_flash_req() argument
373 esas2r_build_mgt_req(struct esas2r_adapter * a,struct esas2r_request * rq,u8 sub_func,u8 scan_gen,u16 dev_index,u32 length,void * data) esas2r_build_mgt_req() argument
420 esas2r_build_ae_req(struct esas2r_adapter * a,struct esas2r_request * rq) esas2r_build_ae_req() argument
449 esas2r_build_ioctl_req(struct esas2r_adapter * a,struct esas2r_request * rq,u32 length,u8 sub_func) esas2r_build_ioctl_req() argument
466 esas2r_build_cfg_req(struct esas2r_adapter * a,struct esas2r_request * rq,u8 sub_func,u32 length,void * data) esas2r_build_cfg_req() argument
487 clear_vda_request(struct esas2r_request * rq) clear_vda_request() argument
[all...]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_rx.c64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq, in mlx5e_read_enhanced_title_slot() argument
92 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_enhanced_title_slot()
97 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state))) in mlx5e_read_enhanced_title_slot()
100 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) in mlx5e_read_enhanced_title_slot()
105 mlx5_wq_cyc_ctr2ix(&rq in mlx5e_read_enhanced_title_slot()
108 mlx5e_read_title_slot(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc) mlx5e_read_title_slot() argument
153 mlx5e_decompress_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc) mlx5e_decompress_cqe() argument
183 mlx5e_decompress_cqe_no_hash(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc) mlx5e_decompress_cqe_no_hash() argument
194 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,struct mlx5_cqe64 * cqe,int budget_rem) mlx5e_decompress_enhanced_cqe() argument
226 mlx5e_decompress_cqes_cont(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int update_owner_only,int budget_rem) mlx5e_decompress_cqes_cont() argument
256 mlx5e_decompress_cqes_start(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int budget_rem) mlx5e_decompress_cqes_start() argument
304 mlx5e_get_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag) mlx5e_get_rx_frag() argument
331 mlx5e_put_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag) mlx5e_put_rx_frag() argument
338 get_frag(struct mlx5e_rq * rq,u16 ix) get_frag() argument
343 mlx5e_alloc_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_rx_wqe_cyc * wqe,u16 ix) mlx5e_alloc_rx_wqe() argument
374 mlx5e_free_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi) mlx5e_free_rx_wqe() argument
389 mlx5e_dealloc_rx_wqe(struct mlx5e_rq * rq,u16 ix) mlx5e_dealloc_rx_wqe() argument
407 mlx5e_xsk_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk) mlx5e_xsk_free_rx_wqes() argument
425 mlx5e_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk) mlx5e_free_rx_wqes() argument
439 mlx5e_alloc_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk) mlx5e_alloc_rx_wqes() argument
457 mlx5e_refill_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk) mlx5e_refill_rx_wqes() argument
498 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq * rq,struct skb_shared_info * sinfo,struct xdp_buff * xdp,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len) mlx5e_add_skb_shared_info_frag() argument
526 mlx5e_add_skb_frag(struct mlx5e_rq * rq,struct sk_buff * skb,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len,unsigned int truesize) mlx5e_add_skb_frag() argument
549 mlx5e_copy_skb_header(struct mlx5e_rq * rq,struct sk_buff * skb,netmem_ref netmem,dma_addr_t addr,int offset_from,int dma_offset,u32 headlen) mlx5e_copy_skb_header() argument
563 mlx5e_free_rx_mpwqe(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi) mlx5e_free_rx_mpwqe() argument
597 mlx5e_post_rx_mpwqe(struct mlx5e_rq * rq,u8 n) mlx5e_post_rx_mpwqe() argument
650 mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq * rq,int header_index) mlx5e_shampo_hd_to_frag_page() argument
665 mlx5e_build_shampo_hd_umr(struct mlx5e_rq * rq,struct mlx5e_icosq * sq,u16 ksm_entries,u16 index) mlx5e_build_shampo_hd_umr() argument
730 mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq * rq) mlx5e_alloc_rx_hd_mpwqe() argument
771 mlx5e_alloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix) mlx5e_alloc_rx_mpwqe() argument
856 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq * rq,u16 header_index) mlx5e_free_rx_shampo_hd_entry() argument
868 mlx5e_shampo_dealloc_hd(struct mlx5e_rq * rq) mlx5e_shampo_dealloc_hd() argument
877 mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix) mlx5e_dealloc_rx_mpwqe() argument
889 mlx5e_post_rx_wqes(struct mlx5e_rq * rq) mlx5e_post_rx_wqes() argument
969 mlx5e_shampo_fill_umr(struct mlx5e_rq * rq,int len) mlx5e_shampo_fill_umr() argument
991 struct mlx5e_rq *rq = &c->rq; mlx5e_handle_shampo_hd_umr() local
1080 mlx5e_post_rx_mpwqes(struct mlx5e_rq * rq) mlx5e_post_rx_mpwqes() argument
1220 mlx5e_shampo_get_packet_hd(struct mlx5e_rq * rq,u16 header_index) mlx5e_shampo_get_packet_hd() argument
1228 mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4) mlx5e_shampo_update_ipv4_udp_hdr() argument
1247 mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6) mlx5e_shampo_update_ipv6_udp_hdr() argument
1266 mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,struct tcphdr * skb_tcp_hd) mlx5e_shampo_update_fin_psh_flags() argument
1278 mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4,struct mlx5_cqe64 * cqe,bool match) mlx5e_shampo_update_ipv4_tcp_hdr() argument
1302 mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6,struct mlx5_cqe64 * cqe,bool match) mlx5e_shampo_update_ipv6_tcp_hdr() argument
1323 mlx5e_shampo_update_hdr(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match) mlx5e_shampo_update_hdr() argument
1381 mlx5e_enable_ecn(struct mlx5e_rq * rq,struct sk_buff * skb) mlx5e_enable_ecn() argument
1483 mlx5e_handle_csum(struct net_device * netdev,struct mlx5_cqe64 * cqe,struct mlx5e_rq * rq,struct sk_buff * skb,bool lro) mlx5e_handle_csum() argument
1554 mlx5e_build_rx_skb(struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct mlx5e_rq * rq,struct sk_buff * skb) mlx5e_build_rx_skb() argument
1613 mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb) mlx5e_shampo_complete_rx_cqe() argument
1632 mlx5e_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb) mlx5e_complete_rx_cqe() argument
1645 mlx5e_build_linear_skb(struct mlx5e_rq * rq,void * va,u32 frag_size,u16 headroom,u32 cqe_bcnt,u32 metasize) mlx5e_build_linear_skb() argument
1665 mlx5e_fill_mxbuf(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,void * va,u16 headroom,u32 frame_sz,u32 len,struct mlx5e_xdp_buff * mxbuf) mlx5e_fill_mxbuf() argument
1676 mlx5e_skb_from_cqe_linear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt) mlx5e_skb_from_cqe_linear() argument
1724 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt) mlx5e_skb_from_cqe_nonlinear() argument
1811 trigger_report(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe) trigger_report() argument
1823 mlx5e_handle_rx_err_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe) mlx5e_handle_rx_err_cqe() argument
1829 mlx5e_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe) mlx5e_handle_rx_cqe() argument
1873 mlx5e_handle_rx_cqe_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe) mlx5e_handle_rx_cqe_rep() argument
1916 mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe) mlx5e_handle_rx_cqe_mpwrq_rep() argument
1974 mlx5e_shampo_fill_skb_data(struct sk_buff * skb,struct mlx5e_rq * rq,struct mlx5e_frag_page * frag_page,u32 data_bcnt,u32 data_offset) mlx5e_shampo_fill_skb_data() argument
1995 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx) mlx5e_skb_from_cqe_mpwrq_nonlinear() argument
2148 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx) mlx5e_skb_from_cqe_mpwrq_linear() argument
2206 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 header_index) mlx5e_skb_from_cqe_shampo() argument
2273 mlx5e_shampo_flush_skb(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match) mlx5e_shampo_flush_skb() argument
2302 mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe) mlx5e_handle_rx_cqe_mpwrq_shampo() argument
2409 mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe) mlx5e_handle_rx_cqe_mpwrq() argument
2468 mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem) mlx5e_rx_cq_process_enhanced_cqe_comp() argument
2517 mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem) mlx5e_rx_cq_process_basic_cqe_comp() argument
2547 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); mlx5e_poll_rx_cq() local
2584 mlx5i_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb) mlx5i_complete_rx_cqe() argument
2669 mlx5i_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe) mlx5i_handle_rx_cqe() argument
2710 mlx5e_rq_set_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params,bool xsk) mlx5e_rq_set_handlers() argument
2759 mlx5e_trap_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe) mlx5e_trap_handle_rx_cqe() argument
2793 mlx5e_rq_set_trap_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params) mlx5e_rq_set_trap_handlers() argument
[all...]
/linux/block/
H A Dblk-flush.c103 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
105 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
108 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
111 * After flush data completion, @rq->bio is %NULL but we need to in blk_flush_restore_request()
112 * complete the bio again. @rq->biotail is guaranteed to equal the in blk_flush_restore_request()
113 * original @rq->bio. Restore it. in blk_flush_restore_request()
115 rq->bio = rq->biotail; in blk_flush_restore_request()
116 if (rq->bio) in blk_flush_restore_request()
117 rq in blk_flush_restore_request()
124 blk_account_io_flush(struct request * rq) blk_account_io_flush() argument
148 blk_flush_complete_seq(struct request * rq,struct blk_flush_queue * fq,unsigned int seq,blk_status_t error) blk_flush_complete_seq() argument
206 struct request *rq, *n; flush_end_io() local
257 is_flush_rq(struct request * rq) is_flush_rq() argument
337 mq_flush_data_end_io(struct request * rq,blk_status_t error) mq_flush_data_end_io() argument
369 blk_rq_init_flush(struct request * rq) blk_rq_init_flush() argument
382 blk_insert_flush(struct request * rq) blk_insert_flush() argument
[all...]
H A Dblk-mq.c42 #include "blk-rq-qos.h"
48 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
49 static void blk_mq_request_bypass_insert(struct request *rq,
92 static bool blk_mq_check_in_driver(struct request *rq, void *priv) in blk_mq_check_in_driver() argument
96 if (rq->rq_flags & RQF_IO_STAT && in blk_mq_check_in_driver()
97 (!bdev_is_partition(mi->part) || rq->part == mi->part) && in blk_mq_check_in_driver()
98 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) in blk_mq_check_in_driver()
99 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_in_driver()
372 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
374 memset(rq, in blk_rq_init()
389 blk_mq_rq_time_init(struct request * rq,u64 alloc_time_ns) blk_mq_rq_time_init() argument
405 struct request *rq = tags->static_rqs[tag]; blk_mq_rq_ctx_init() local
457 struct request *rq; __blk_mq_alloc_requests_batch() local
489 struct request *rq; __blk_mq_alloc_requests() local
585 struct request *rq; blk_mq_rq_cache_fill() local
603 struct request *rq; blk_mq_alloc_cached_request() local
636 struct request *rq; blk_mq_alloc_request() local
686 struct request *rq; blk_mq_alloc_request_hctx() local
752 blk_mq_finish_request(struct request * rq) blk_mq_finish_request() argument
769 __blk_mq_free_request(struct request * rq) __blk_mq_free_request() argument
790 blk_mq_free_request(struct request * rq) blk_mq_free_request() argument
809 struct request *rq; blk_mq_free_plug_rqs() local
815 blk_dump_rq_flags(struct request * rq,char * msg) blk_dump_rq_flags() argument
1123 __blk_mq_end_request_acct(struct request * rq,u64 now) __blk_mq_end_request_acct() argument
1132 __blk_mq_end_request(struct request * rq,blk_status_t error) __blk_mq_end_request() argument
1149 blk_mq_end_request(struct request * rq,blk_status_t error) blk_mq_end_request() argument
1174 struct request *rq; blk_mq_end_request_batch() local
1223 struct request *rq, *next; blk_complete_reqs() local
1245 blk_mq_complete_need_ipi(struct request * rq) blk_mq_complete_need_ipi() argument
1272 blk_mq_complete_send_ipi(struct request * rq) blk_mq_complete_send_ipi() argument
1281 blk_mq_raise_softirq(struct request * rq) blk_mq_raise_softirq() argument
1292 blk_mq_complete_request_remote(struct request * rq) blk_mq_complete_request_remote() argument
1326 blk_mq_complete_request(struct request * rq) blk_mq_complete_request() argument
1341 blk_mq_start_request(struct request * rq) blk_mq_start_request() argument
1381 blk_add_rq_to_plug(struct blk_plug * plug,struct request * rq) blk_add_rq_to_plug() argument
1419 blk_execute_rq_nowait(struct request * rq,bool at_head) blk_execute_rq_nowait() argument
1443 blk_end_sync_rq(struct request * rq,blk_status_t ret) blk_end_sync_rq() argument
1452 blk_rq_is_poll(struct request * rq) blk_rq_is_poll() argument
1462 blk_rq_poll_completion(struct request * rq,struct completion * wait) blk_rq_poll_completion() argument
1480 blk_execute_rq(struct request * rq,bool at_head) blk_execute_rq() argument
1506 __blk_mq_requeue_request(struct request * rq) __blk_mq_requeue_request() argument
1521 blk_mq_requeue_request(struct request * rq,bool kick_requeue_list) blk_mq_requeue_request() argument
1546 struct request *rq; blk_mq_requeue_work() local
1591 blk_is_flush_data_rq(struct request * rq) blk_is_flush_data_rq() argument
1596 blk_mq_rq_inflight(struct request * rq,void * priv) blk_mq_rq_inflight() argument
1650 blk_mq_req_expired(struct request * rq,struct blk_expired_data * expired) blk_mq_req_expired() argument
1670 blk_mq_put_rq_ref(struct request * rq) blk_mq_put_rq_ref() argument
1680 blk_mq_check_expired(struct request * rq,void * priv) blk_mq_check_expired() argument
1698 blk_mq_handle_expired(struct request * rq,void * priv) blk_mq_handle_expired() argument
1801 struct request *rq; global() member
1839 __blk_mq_alloc_driver_tag(struct request * rq) __blk_mq_alloc_driver_tag() argument
1892 blk_mq_mark_tag_wait(struct blk_mq_hw_ctx * hctx,struct request * rq) blk_mq_mark_tag_wait() argument
2004 blk_mq_handle_dev_resource(struct request * rq,struct list_head * list) blk_mq_handle_dev_resource() argument
2017 blk_mq_prep_dispatch_rq(struct request * rq,bool need_budget) blk_mq_prep_dispatch_rq() argument
2058 struct request *rq; blk_mq_release_budgets() local
2093 struct request *rq; blk_mq_dispatch_rq_list() local
2546 blk_mq_request_bypass_insert(struct request * rq,blk_insert_t flags) blk_mq_request_bypass_insert() argument
2562 struct request *rq; blk_mq_insert_requests() local
2595 blk_mq_insert_request(struct request * rq,blk_insert_t flags) blk_mq_insert_request() argument
2657 blk_mq_bio_to_request(struct request * rq,struct bio * bio,unsigned int nr_segs) blk_mq_bio_to_request() argument
2681 __blk_mq_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,bool last) __blk_mq_issue_directly() argument
2713 blk_mq_get_budget_and_tag(struct request * rq) blk_mq_get_budget_and_tag() argument
2739 blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq) blk_mq_try_issue_directly() argument
2770 blk_mq_request_issue_directly(struct request * rq,bool last) blk_mq_request_issue_directly() argument
2788 struct request *rq; blk_mq_issue_direct() local
2834 struct request *rq = rq_list_pop(rqs); blk_mq_extract_queue_requests() local
2891 struct request *rq = rq_list_pop(rqs); blk_mq_dispatch_list() local
2978 struct request *rq = list_first_entry(list, struct request, blk_mq_try_issue_list_directly() local
3031 struct request *rq; blk_mq_get_new_requests() local
3054 struct request *rq; blk_mq_peek_cached_request() local
3069 blk_mq_use_cached_rq(struct request * rq,struct blk_plug * plug,struct bio * bio) blk_mq_use_cached_rq() argument
3118 struct request *rq; blk_mq_submit_bio() local
3240 blk_insert_cloned_request(struct request * rq) blk_insert_cloned_request() argument
3306 blk_rq_unprep_clone(struct request * rq) blk_rq_unprep_clone() argument
3335 blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data) blk_rq_prep_clone() argument
3391 blk_steal_bios(struct bio_list * list,struct request * rq) blk_steal_bios() argument
3433 struct request *rq = drv_tags->rqs[i]; blk_mq_clear_rq_mapping() local
3471 struct request *rq = tags->static_rqs[i]; blk_mq_free_rqs() local
3567 blk_mq_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,int node) blk_mq_init_request() argument
3640 struct request *rq = p; blk_mq_alloc_rqs() local
3664 blk_mq_has_request(struct request * rq,void * data) blk_mq_has_request() argument
5160 blk_rq_poll(struct request * rq,struct io_comp_batch * iob,unsigned int poll_flags) blk_rq_poll() argument
5178 blk_mq_rq_cpu(struct request * rq) blk_mq_rq_cpu() argument
[all...]
/linux/include/linux/
H A Dblk-mq.h142 * rq sectors used for blk stats. It has the same value
143 * with blk_rq_sectors(rq), except that it never be zeroed
216 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough() argument
218 return blk_op_is_passthrough(rq->cmd_flags); in blk_rq_is_passthrough()
228 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) argument
230 #define rq_dma_dir(rq) \ argument
231 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
244 static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq) in rq_list_add_tail() argument
246 rq in rq_list_add_tail()
254 rq_list_add_head(struct rq_list * rl,struct request * rq) rq_list_add_head() argument
264 struct request *rq = rl->head; rq_list_pop() local
545 struct request *rq; global() member
804 blk_mq_rq_state(struct request * rq) blk_mq_rq_state() argument
809 blk_mq_request_started(struct request * rq) blk_mq_request_started() argument
814 blk_mq_request_completed(struct request * rq) blk_mq_request_completed() argument
826 blk_mq_set_request_complete(struct request * rq) blk_mq_set_request_complete() argument
835 blk_mq_complete_request_direct(struct request * rq,void (* complete)(struct request * rq)) blk_mq_complete_request_direct() argument
836 blk_mq_complete_request_direct(struct request * rq,void (* complete)(struct request * rq)) blk_mq_complete_request_direct() argument
851 blk_mq_need_time_stamp(struct request * rq) blk_mq_need_time_stamp() argument
856 blk_mq_is_reserved_rq(struct request * rq) blk_mq_is_reserved_rq() argument
993 blk_mq_rq_to_pdu(struct request * rq) blk_mq_rq_to_pdu() argument
1005 blk_mq_cleanup_rq(struct request * rq) blk_mq_cleanup_rq() argument
1014 rq_is_sync(struct request * rq) rq_is_sync() argument
1054 __rq_for_each_bio(_bio,rq) global() argument
1078 blk_rq_pos(const struct request * rq) blk_rq_pos() argument
1083 blk_rq_bytes(const struct request * rq) blk_rq_bytes() argument
1088 blk_rq_cur_bytes(const struct request * rq) blk_rq_cur_bytes() argument
1097 blk_rq_sectors(const struct request * rq) blk_rq_sectors() argument
1102 blk_rq_cur_sectors(const struct request * rq) blk_rq_cur_sectors() argument
1107 blk_rq_stats_sectors(const struct request * rq) blk_rq_stats_sectors() argument
1118 blk_rq_payload_bytes(struct request * rq) blk_rq_payload_bytes() argument
1129 req_bvec(struct request * rq) req_bvec() argument
1136 blk_rq_count_bios(struct request * rq) blk_rq_count_bios() argument
1168 blk_rq_nr_phys_segments(struct request * rq) blk_rq_nr_phys_segments() argument
1179 blk_rq_nr_discard_segments(struct request * rq) blk_rq_nr_discard_segments() argument
1186 blk_rq_map_sg(struct request * rq,struct scatterlist * sglist) blk_rq_map_sg() argument
[all...]

12345678910>>...24