Home
last modified time | relevance | path

Searched refs:rq_flags (Results 1 – 25 of 66) sorted by relevance

123

/linux/block/
H A Dblk-mq-sched.h50 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_allow_merge()
61 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_completed_request()
71 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_requeue_request()
H A Dblk-mq.c96 if (rq->rq_flags & RQF_IO_STAT && in blk_mq_check_in_driver()
422 data->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init()
423 rq->rq_flags = data->rq_flags; in blk_mq_rq_ctx_init()
425 if (data->rq_flags & RQF_SCHED_TAGS) { in blk_mq_rq_ctx_init()
448 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_rq_ctx_init()
485 if (!(data->rq_flags & RQF_SCHED_TAGS)) in __blk_mq_alloc_requests_batch()
517 data->rq_flags |= RQF_SCHED_TAGS; in __blk_mq_alloc_requests()
529 data->rq_flags |= RQF_USE_SCHED; in __blk_mq_alloc_requests()
538 data->rq_flags |= RQF_RESV; in __blk_mq_alloc_requests()
571 if (!(data->rq_flags & RQF_SCHED_TAGS)) in __blk_mq_alloc_requests()
[all …]
H A Dblk-merge.c647 if (rq->rq_flags & RQF_MIXED_MERGE) in blk_rq_set_mixed_merge()
660 rq->rq_flags |= RQF_MIXED_MERGE; in blk_rq_set_mixed_merge()
679 if (req->rq_flags & RQF_MIXED_MERGE) { in blk_update_mixed_merge()
692 if (req->rq_flags & RQF_IO_STAT) { in blk_account_io_merge_request()
773 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || in attempt_merge()
885 if (req->rq_flags & RQF_IO_STAT) { in blk_account_io_merge_bio()
908 if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING) in bio_attempt_back_merge()
931 if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING) in bio_attempt_front_merge()
H A Dblk-flush.c120 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
319 flush_rq->rq_flags |= RQF_FLUSH_SEQ; in blk_kick_flush()
372 rq->rq_flags |= RQF_FLUSH_SEQ; in blk_rq_init_flush()
H A Dblk-pm.h21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
H A Dblk-timeout.c140 req->rq_flags &= ~RQF_TIMED_OUT; in blk_add_timer()
H A Dblk-mq.h166 req_flags_t rq_flags; member
234 if (data->rq_flags & RQF_SCHED_TAGS) in blk_mq_tags_from_data()
H A Dblk.h178 if (rq->rq_flags & RQF_NOMERGE_FLAGS) in rq_mergeable()
332 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
506 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING) in blk_zone_finish_request()
/linux/include/linux/
H A Dblk-mq.h109 req_flags_t rq_flags; member
857 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); in blk_mq_need_time_stamp()
862 return rq->rq_flags & RQF_RESV; in blk_mq_is_reserved_rq()
890 if (req->rq_flags & RQF_SCHED_TAGS) in blk_mq_add_to_batch()
1124 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_payload_bytes()
1135 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in req_bvec()
1174 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_nr_phys_segments()
/linux/kernel/sched/
H A Dsched.h1738 struct rq_flags { struct
1793 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock()
1802 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock()
1811 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock()
1822 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1826 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1830 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock()
1838 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock()
1850 struct rq *rq; struct rq_flags rf)
1852 static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave()
[all …]
H A Dcore.c705 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
729 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock()
874 struct rq_flags rf; in hrtick()
900 struct rq_flags rf; in __hrtick_start()
1836 struct rq_flags rf; in uclamp_update_active()
2262 struct rq_flags rf; in wait_task_inactive()
2464 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task()
2510 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task()
2534 struct rq_flags rf; in migration_cpu_stop()
2900 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task()
[all …]
H A Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop()
H A Dcore_sched.c59 struct rq_flags rf; in sched_core_update_cookie()
H A Ddeadline.c1135 static void __push_dl_task(struct rq *rq, struct rq_flags *rf) in __push_dl_task()
1161 struct rq_flags *rf = &scope.rf; in dl_server_timer()
1222 struct rq_flags rf; in dl_task_timer()
1728 struct rq_flags rf; in inactive_task_timer()
2220 struct rq_flags rf; in migrate_task_rq_dl()
2271 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_dl()
2912 struct rq_flags rf; in dl_add_task_root_domain()
H A Didle.c442 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_idle()
/linux/drivers/net/ethernet/fungible/funcore/
H A Dfun_queue.h69 u16 rq_flags; member
120 u16 rq_flags; member
H A Dfun_queue.c459 funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ; in fun_alloc_queue()
491 rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0, in fun_create_rq()
/linux/include/linux/sunrpc/
H A Dsvc.h216 unsigned long rq_flags; /* flags field */ member
316 set_bit(RQ_VICTIM, &rqstp->rq_flags); in svc_thread_should_stop()
318 return test_bit(RQ_VICTIM, &rqstp->rq_flags); in svc_thread_should_stop()
/linux/drivers/scsi/
H A Dscsi_lib.c118 if (rq->rq_flags & RQF_DONTPREP) { in scsi_mq_requeue_cmd()
119 rq->rq_flags &= ~RQF_DONTPREP; in scsi_mq_requeue_cmd()
326 req->rq_flags |= RQF_QUIET; in scsi_execute_cmd()
737 if (!(rq->rq_flags & RQF_MIXED_MERGE)) in scsi_rq_err_bytes()
921 if (!(req->rq_flags & RQF_QUIET)) { in scsi_io_completion_action()
1012 else if (req->rq_flags & RQF_QUIET) in scsi_io_completion_nz_result()
1259 if (rq->rq_flags & RQF_DONTPREP) { in scsi_cleanup_rq()
1261 rq->rq_flags &= ~RQF_DONTPREP; in scsi_cleanup_rq()
1340 if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) in scsi_device_state_check()
1348 if (req && !(req->rq_flags & RQF_PM)) in scsi_device_state_check()
[all …]
/linux/drivers/nvme/host/
H A Dioctl.c102 struct nvme_command *cmd, blk_opf_t rq_flags, in nvme_alloc_user_request() argument
107 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); in nvme_alloc_user_request()
449 blk_opf_t rq_flags = REQ_ALLOC_CACHE; in nvme_uring_cmd_io() local
500 rq_flags |= REQ_NOWAIT; in nvme_uring_cmd_io()
504 rq_flags |= REQ_POLLED; in nvme_uring_cmd_io()
506 req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags); in nvme_uring_cmd_io()
/linux/drivers/mmc/core/
H A Dqueue.c243 req->rq_flags |= RQF_QUIET; in mmc_mq_queue_rq()
291 if (!(req->rq_flags & RQF_DONTPREP)) { in mmc_mq_queue_rq()
293 req->rq_flags |= RQF_DONTPREP; in mmc_mq_queue_rq()
/linux/net/sunrpc/
H A Dsvc_xprt.c419 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_reserve_slot()
423 set_bit(RQ_DATA, &rqstp->rq_flags); in svc_xprt_reserve_slot()
431 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_release_slot()
1189 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer()
1220 set_bit(RQ_DROPME, &rqstp->rq_flags); in svc_defer()
/linux/drivers/md/
H A Ddm-rq.c266 if (rq->rq_flags & RQF_FAILED) in dm_softirq_done()
293 rq->rq_flags |= RQF_FAILED; in dm_kill_unmapped_request()
/linux/drivers/scsi/device_handler/
H A Dscsi_dh_hp_sw.c191 req->rq_flags |= RQF_QUIET; in hp_sw_prep_fn()
/linux/fs/nfsd/
H A Dnfscache.c523 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) in nfsd_cache_lookup()
619 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); in nfsd_cache_update()

123