Lines Matching full:request

23 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
26 * request flags */
31 /* request for flush sequence */
39 /* use an I/O scheduler for this request */
47 /* runtime pm request */
56 /* The per-zone write lock is held for this request */
78 struct request { struct
100 struct request *rq_next; argument
105 /* Time that the first bio started allocating this request. */ argument
108 /* Time that this request was allocated for this IO. */ argument
147 * request reaches the dispatch list. The ipi_list is only used argument
148 * to queue the request for softirq completion, which is long
149 * after the request has been unhashed (and even removed from
191 static inline enum req_op req_op(const struct request *req) in req_op() argument
196 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough()
201 static inline unsigned short req_get_ioprio(struct request *req) in req_get_ioprio()
224 struct request *__req = NULL; \
234 struct request *__req = NULL; \
248 #define rq_list_empty(list) ((list) == (struct request *) NULL)
251 * rq_list_move() - move a struct request from one list to another
254 * @rq: The request to move
255 * @prev: The request preceding @rq in @src (NULL if @rq is the head)
257 static inline void rq_list_move(struct request **src, struct request **dst, in rq_list_move()
258 struct request *rq, struct request *prev) in rq_list_move()
271 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
272 * request to complete.
326 * @sched_data: Pointer owned by the IO scheduler attached to a request
331 * @queue: Pointer to the request queue that owns this hardware context.
345 * pending request in that software queue.
384 * assigned when a request is dispatched from a hardware queue.
389 * scheduler associated with a request queue, a tag is assigned when
390 * that request is allocated. Else, this member is not used.
401 * shared across request queues.
405 /** @cpuhp_online: List to store request if CPU is going to die */
407 /** @cpuhp_dead: List to store request if some CPU die. */
461 * struct blk_mq_tag_set - tag set that can be shared between request queues
475 * @cmd_size: Number of additional bytes to allocate per request. The block
478 * @timeout: Request processing timeout in jiffies.
488 * @tag_list: List of the request queues that use this tag set. See also
490 * @srcu: Use as lock when type of the request queue is blocking
516 * struct blk_mq_queue_data - Data about a request inserted in a queue
518 * @rq: Request pointer.
519 * @last: If it is the last request in the queue.
522 struct request *rq;
526 typedef bool (busy_tag_iter_fn)(struct request *, void *);
534 * @queue_rq: Queue a new request from block IO.
543 * purpose of kicking the hardware (which the last request otherwise
550 * that each request belongs to the same queue. If the driver doesn't
554 void (*queue_rqs)(struct request **rqlist);
557 * @get_budget: Reserve budget before queue request, once .queue_rq is
572 void (*set_rq_budget_token)(struct request *, int);
576 int (*get_rq_budget_token)(struct request *);
579 * @timeout: Called on request timeout.
581 enum blk_eh_timer_return (*timeout)(struct request *);
589 * @complete: Mark the request as complete.
591 void (*complete)(struct request *);
609 * flush request.
611 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
616 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
620 * @cleanup_rq: Called before freeing one request which isn't completed
623 void (*cleanup_rq)(struct request *);
639 * information about a request.
641 void (*show_rq)(struct seq_file *m, struct request *rq);
706 void blk_mq_free_request(struct request *rq);
707 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
721 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
723 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
738 struct request **rqs;
739 struct request **static_rqs;
743 * used to clear request reference in rqs[] before freeing one
744 * request pool
749 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, in blk_mq_tag_to_rq()
765 u32 blk_mq_unique_tag(struct request *rq);
778 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
779 * @rq: target request.
781 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) in blk_mq_rq_state()
786 static inline int blk_mq_request_started(struct request *rq) in blk_mq_request_started()
791 static inline int blk_mq_request_completed(struct request *rq) in blk_mq_request_completed()
798 * Set the state to complete when completing a request from inside ->queue_rq.
800 * need access to the request are called on failure, e.g. by nvme for
803 static inline void blk_mq_set_request_complete(struct request *rq) in blk_mq_set_request_complete()
809 * Complete the request directly instead of deferring it to softirq or
812 static inline void blk_mq_complete_request_direct(struct request *rq, in blk_mq_complete_request_direct()
813 void (*complete)(struct request *rq)) in blk_mq_complete_request_direct()
819 void blk_mq_start_request(struct request *rq);
820 void blk_mq_end_request(struct request *rq, blk_status_t error);
821 void __blk_mq_end_request(struct request *rq, blk_status_t error);
828 static inline bool blk_mq_need_time_stamp(struct request *rq) in blk_mq_need_time_stamp()
839 static inline bool blk_mq_is_reserved_rq(struct request *rq) in blk_mq_is_reserved_rq()
848 static inline bool blk_mq_add_to_batch(struct request *req, in blk_mq_add_to_batch()
853 * blk_mq_end_request_batch() can't end request allocated from in blk_mq_add_to_batch()
869 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
872 void blk_mq_complete_request(struct request *rq);
873 bool blk_mq_complete_request_remote(struct request *rq);
904 unsigned int blk_mq_rq_cpu(struct request *rq);
916 * blk_mq_rq_from_pdu - cast a PDU to a request
919 * Return: request
921 * Driver command data is immediately after the request. So subtract request
922 * size to get back to the original request.
924 static inline struct request *blk_mq_rq_from_pdu(void *pdu) in blk_mq_rq_from_pdu()
926 return pdu - sizeof(struct request); in blk_mq_rq_from_pdu()
930 * blk_mq_rq_to_pdu - cast a request to a PDU
931 * @rq: the request to be casted
935 * Driver command data is immediately after the request. So add request to get
938 static inline void *blk_mq_rq_to_pdu(struct request *rq) in blk_mq_rq_to_pdu()
950 static inline void blk_mq_cleanup_rq(struct request *rq) in blk_mq_cleanup_rq()
956 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, in blk_rq_bio_prep()
968 static inline bool rq_is_sync(struct request *rq) in rq_is_sync()
973 void blk_rq_init(struct request_queue *q, struct request *rq);
974 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
977 void blk_rq_unprep_clone(struct request *rq);
978 blk_status_t blk_insert_cloned_request(struct request *rq);
989 int blk_rq_map_user(struct request_queue *, struct request *,
991 int blk_rq_map_user_io(struct request *, struct rq_map_data *,
993 int blk_rq_map_user_iov(struct request_queue *, struct request *,
996 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
998 int blk_rq_append_bio(struct request *rq, struct bio *bio);
999 void blk_execute_rq_nowait(struct request *rq, bool at_head);
1000 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1001 bool blk_rq_is_poll(struct request *rq);
1026 * blk_rq_bytes() : bytes left in the entire request
1028 * blk_rq_sectors() : sectors left in the entire request
1030 * blk_rq_stats_sectors() : sectors of the entire request used for stats
1032 static inline sector_t blk_rq_pos(const struct request *rq) in blk_rq_pos()
1037 static inline unsigned int blk_rq_bytes(const struct request *rq) in blk_rq_bytes()
1042 static inline int blk_rq_cur_bytes(const struct request *rq) in blk_rq_cur_bytes()
1051 static inline unsigned int blk_rq_sectors(const struct request *rq) in blk_rq_sectors()
1056 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) in blk_rq_cur_sectors()
1061 static inline unsigned int blk_rq_stats_sectors(const struct request *rq) in blk_rq_stats_sectors()
1068 * is different from the size of the request. Any driver that supports such
1072 static inline unsigned int blk_rq_payload_bytes(struct request *rq) in blk_rq_payload_bytes()
1080 * Return the first full biovec in the request. The caller needs to check that
1083 static inline struct bio_vec req_bvec(struct request *rq) in req_bvec()
1090 static inline unsigned int blk_rq_count_bios(struct request *rq) in blk_rq_count_bios()
1101 void blk_steal_bios(struct bio_list *list, struct request *rq);
1104 * Request completion related functions.
1107 * the request without completing it.
1109 bool blk_update_request(struct request *rq, blk_status_t error,
1111 void blk_abort_request(struct request *);
1122 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) in blk_rq_nr_phys_segments()
1131 * Each discard bio merged into a request is counted as one segment.
1133 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) in blk_rq_nr_discard_segments()
1138 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1140 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg()
1147 void blk_dump_rq_flags(struct request *, char *);
1150 static inline unsigned int blk_rq_zone_no(struct request *rq) in blk_rq_zone_no()
1155 static inline unsigned int blk_rq_zone_is_seq(struct request *rq) in blk_rq_zone_is_seq()
1162 * @rq: Request to examine.
1166 static inline bool blk_rq_is_seq_zoned_write(struct request *rq) in blk_rq_is_seq_zoned_write()
1172 bool blk_req_needs_zone_write_lock(struct request *rq);
1173 bool blk_req_zone_write_trylock(struct request *rq);
1174 void __blk_req_zone_write_lock(struct request *rq);
1175 void __blk_req_zone_write_unlock(struct request *rq);
1177 static inline void blk_req_zone_write_lock(struct request *rq) in blk_req_zone_write_lock()
1183 static inline void blk_req_zone_write_unlock(struct request *rq) in blk_req_zone_write_unlock()
1189 static inline bool blk_req_zone_is_write_locked(struct request *rq) in blk_req_zone_is_write_locked()
1195 static inline bool blk_req_can_dispatch_to_zone(struct request *rq) in blk_req_can_dispatch_to_zone()
1202 static inline bool blk_rq_is_seq_zoned_write(struct request *rq) in blk_rq_is_seq_zoned_write()
1207 static inline bool blk_req_needs_zone_write_lock(struct request *rq) in blk_req_needs_zone_write_lock()
1212 static inline void blk_req_zone_write_lock(struct request *rq) in blk_req_zone_write_lock()
1216 static inline void blk_req_zone_write_unlock(struct request *rq) in blk_req_zone_write_unlock()
1219 static inline bool blk_req_zone_is_write_locked(struct request *rq) in blk_req_zone_is_write_locked()
1224 static inline bool blk_req_can_dispatch_to_zone(struct request *rq) in blk_req_can_dispatch_to_zone()