xref: /linux/block/blk.h (revision fc284d631894d8673d229fad92762b66c9875cab)
18324aa91SJens Axboe #ifndef BLK_INTERNAL_H
28324aa91SJens Axboe #define BLK_INTERNAL_H
38324aa91SJens Axboe 
4a73f730dSTejun Heo #include <linux/idr.h>
5f70ced09SMing Lei #include <linux/blk-mq.h>
6f70ced09SMing Lei #include "blk-mq.h"
7a73f730dSTejun Heo 
886db1e29SJens Axboe /* Amount of time in which a process may batch requests */
986db1e29SJens Axboe #define BLK_BATCH_TIME	(HZ/50UL)
1086db1e29SJens Axboe 
1186db1e29SJens Axboe /* Number of requests a "batching" process may submit */
1286db1e29SJens Axboe #define BLK_BATCH_REQ	32
1386db1e29SJens Axboe 
140d2602caSJens Axboe /* Max future timer expiry for timeouts */
150d2602caSJens Axboe #define BLK_MAX_TIMEOUT		(5 * HZ)
160d2602caSJens Axboe 
177c94e1c1SMing Lei struct blk_flush_queue {
187c94e1c1SMing Lei 	unsigned int		flush_queue_delayed:1;
197c94e1c1SMing Lei 	unsigned int		flush_pending_idx:1;
207c94e1c1SMing Lei 	unsigned int		flush_running_idx:1;
217c94e1c1SMing Lei 	unsigned long		flush_pending_since;
227c94e1c1SMing Lei 	struct list_head	flush_queue[2];
237c94e1c1SMing Lei 	struct list_head	flush_data_in_flight;
247c94e1c1SMing Lei 	struct request		*flush_rq;
250048b483SMing Lei 
260048b483SMing Lei 	/*
270048b483SMing Lei 	 * flush_rq shares tag with this rq, both can't be active
280048b483SMing Lei 	 * at the same time
290048b483SMing Lei 	 */
300048b483SMing Lei 	struct request		*orig_rq;
317c94e1c1SMing Lei 	spinlock_t		mq_flush_lock;
327c94e1c1SMing Lei };
337c94e1c1SMing Lei 
348324aa91SJens Axboe extern struct kmem_cache *blk_requestq_cachep;
35320ae51fSJens Axboe extern struct kmem_cache *request_cachep;
368324aa91SJens Axboe extern struct kobj_type blk_queue_ktype;
37a73f730dSTejun Heo extern struct ida blk_queue_ida;
388324aa91SJens Axboe 
397c94e1c1SMing Lei static inline struct blk_flush_queue *blk_get_flush_queue(
40e97c293cSMing Lei 		struct request_queue *q, struct blk_mq_ctx *ctx)
417c94e1c1SMing Lei {
42f70ced09SMing Lei 	struct blk_mq_hw_ctx *hctx;
43f70ced09SMing Lei 
44f70ced09SMing Lei 	if (!q->mq_ops)
457c94e1c1SMing Lei 		return q->fq;
46f70ced09SMing Lei 
47f70ced09SMing Lei 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
48f70ced09SMing Lei 
49f70ced09SMing Lei 	return hctx->fq;
507c94e1c1SMing Lei }
517c94e1c1SMing Lei 
5209ac46c4STejun Heo static inline void __blk_get_queue(struct request_queue *q)
5309ac46c4STejun Heo {
5409ac46c4STejun Heo 	kobject_get(&q->kobj);
5509ac46c4STejun Heo }
5609ac46c4STejun Heo 
57f70ced09SMing Lei struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
58f70ced09SMing Lei 		int node, int cmd_size);
59f70ced09SMing Lei void blk_free_flush_queue(struct blk_flush_queue *q);
60f3552655SMing Lei 
615b788ce3STejun Heo int blk_init_rl(struct request_list *rl, struct request_queue *q,
625b788ce3STejun Heo 		gfp_t gfp_mask);
635b788ce3STejun Heo void blk_exit_rl(struct request_list *rl);
6486db1e29SJens Axboe void init_request_from_bio(struct request *req, struct bio *bio);
6586db1e29SJens Axboe void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
6686db1e29SJens Axboe 			struct bio *bio);
67a411f4bbSBoaz Harrosh int blk_rq_append_bio(struct request_queue *q, struct request *rq,
68a411f4bbSBoaz Harrosh 		      struct bio *bio);
69d732580bSTejun Heo void blk_queue_bypass_start(struct request_queue *q);
70d732580bSTejun Heo void blk_queue_bypass_end(struct request_queue *q);
719934c8c0STejun Heo void blk_dequeue_request(struct request *rq);
728324aa91SJens Axboe void __blk_queue_free_tags(struct request_queue *q);
734853abaaSJeff Moyer bool __blk_end_bidi_request(struct request *rq, int error,
744853abaaSJeff Moyer 			    unsigned int nr_bytes, unsigned int bidi_bytes);
753ef28e83SDan Williams void blk_freeze_queue(struct request_queue *q);
763ef28e83SDan Williams 
773ef28e83SDan Williams static inline void blk_queue_enter_live(struct request_queue *q)
783ef28e83SDan Williams {
793ef28e83SDan Williams 	/*
803ef28e83SDan Williams 	 * Given that running in generic_make_request() context
813ef28e83SDan Williams 	 * guarantees that a live reference against q_usage_counter has
823ef28e83SDan Williams 	 * been established, further references under that same context
833ef28e83SDan Williams 	 * need not check that the queue has been frozen (marked dead).
843ef28e83SDan Williams 	 */
853ef28e83SDan Williams 	percpu_ref_get(&q->q_usage_counter);
863ef28e83SDan Williams }
878324aa91SJens Axboe 
885a48fc14SDan Williams #ifdef CONFIG_BLK_DEV_INTEGRITY
895a48fc14SDan Williams void blk_flush_integrity(void);
905a48fc14SDan Williams #else
915a48fc14SDan Williams static inline void blk_flush_integrity(void)
925a48fc14SDan Williams {
935a48fc14SDan Williams }
945a48fc14SDan Williams #endif
958324aa91SJens Axboe 
96242f9dcbSJens Axboe void blk_rq_timed_out_timer(unsigned long data);
970d2602caSJens Axboe unsigned long blk_rq_timeout(unsigned long timeout);
9887ee7b11SJens Axboe void blk_add_timer(struct request *req);
99242f9dcbSJens Axboe void blk_delete_timer(struct request *);
100242f9dcbSJens Axboe 
101320ae51fSJens Axboe 
102320ae51fSJens Axboe bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
103320ae51fSJens Axboe 			     struct bio *bio);
104320ae51fSJens Axboe bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
105320ae51fSJens Axboe 			    struct bio *bio);
106320ae51fSJens Axboe bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1075b3f341fSShaohua Li 			    unsigned int *request_count,
1085b3f341fSShaohua Li 			    struct request **same_queue_rq);
1090809e3acSJeff Moyer unsigned int blk_plug_queued_count(struct request_queue *q);
110320ae51fSJens Axboe 
111320ae51fSJens Axboe void blk_account_io_start(struct request *req, bool new_io);
112320ae51fSJens Axboe void blk_account_io_completion(struct request *req, unsigned int bytes);
113320ae51fSJens Axboe void blk_account_io_done(struct request *req);
114320ae51fSJens Axboe 
115242f9dcbSJens Axboe /*
116242f9dcbSJens Axboe  * Internal atomic flags for request handling
117242f9dcbSJens Axboe  */
118242f9dcbSJens Axboe enum rq_atomic_flags {
119242f9dcbSJens Axboe 	REQ_ATOM_COMPLETE = 0,
120320ae51fSJens Axboe 	REQ_ATOM_STARTED,
121242f9dcbSJens Axboe };
122242f9dcbSJens Axboe 
123242f9dcbSJens Axboe /*
124242f9dcbSJens Axboe  * EH timer and IO completion will both attempt to 'grab' the request, make
12525985edcSLucas De Marchi  * sure that only one of them succeeds
126242f9dcbSJens Axboe  */
127242f9dcbSJens Axboe static inline int blk_mark_rq_complete(struct request *rq)
128242f9dcbSJens Axboe {
129242f9dcbSJens Axboe 	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
130242f9dcbSJens Axboe }
131242f9dcbSJens Axboe 
132242f9dcbSJens Axboe static inline void blk_clear_rq_complete(struct request *rq)
133242f9dcbSJens Axboe {
134242f9dcbSJens Axboe 	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
135242f9dcbSJens Axboe }
13686db1e29SJens Axboe 
137158dbda0STejun Heo /*
138158dbda0STejun Heo  * Internal elevator interface
139158dbda0STejun Heo  */
140360f92c2SJens Axboe #define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
141158dbda0STejun Heo 
142ae1b1539STejun Heo void blk_insert_flush(struct request *rq);
143dd831006STejun Heo 
144158dbda0STejun Heo static inline struct request *__elv_next_request(struct request_queue *q)
145158dbda0STejun Heo {
146158dbda0STejun Heo 	struct request *rq;
147e97c293cSMing Lei 	struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
148158dbda0STejun Heo 
149158dbda0STejun Heo 	while (1) {
150ae1b1539STejun Heo 		if (!list_empty(&q->queue_head)) {
151158dbda0STejun Heo 			rq = list_entry_rq(q->queue_head.next);
152158dbda0STejun Heo 			return rq;
153158dbda0STejun Heo 		}
154158dbda0STejun Heo 
1553ac0cc45Sshaohua.li@intel.com 		/*
1563ac0cc45Sshaohua.li@intel.com 		 * Flush request is running and flush request isn't queueable
1573ac0cc45Sshaohua.li@intel.com 		 * in the drive, we can hold the queue till flush request is
1583ac0cc45Sshaohua.li@intel.com 		 * finished. Even we don't do this, driver can't dispatch next
1593ac0cc45Sshaohua.li@intel.com 		 * requests and will requeue them. And this can improve
1603ac0cc45Sshaohua.li@intel.com 		 * throughput too. For example, we have request flush1, write1,
1613ac0cc45Sshaohua.li@intel.com 		 * flush 2. flush1 is dispatched, then queue is hold, write1
1623ac0cc45Sshaohua.li@intel.com 		 * isn't inserted to queue. After flush1 is finished, flush2
1633ac0cc45Sshaohua.li@intel.com 		 * will be dispatched. Since disk cache is already clean,
1643ac0cc45Sshaohua.li@intel.com 		 * flush2 will be finished very soon, so looks like flush2 is
1653ac0cc45Sshaohua.li@intel.com 		 * folded to flush1.
1663ac0cc45Sshaohua.li@intel.com 		 * Since the queue is hold, a flag is set to indicate the queue
1673ac0cc45Sshaohua.li@intel.com 		 * should be restarted later. Please see flush_end_io() for
1683ac0cc45Sshaohua.li@intel.com 		 * details.
1693ac0cc45Sshaohua.li@intel.com 		 */
1707c94e1c1SMing Lei 		if (fq->flush_pending_idx != fq->flush_running_idx &&
1713ac0cc45Sshaohua.li@intel.com 				!queue_flush_queueable(q)) {
1727c94e1c1SMing Lei 			fq->flush_queue_delayed = 1;
1733ac0cc45Sshaohua.li@intel.com 			return NULL;
1743ac0cc45Sshaohua.li@intel.com 		}
175556ee818STejun Heo 		if (unlikely(blk_queue_bypass(q)) ||
17622f746e2STejun Heo 		    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
177158dbda0STejun Heo 			return NULL;
178158dbda0STejun Heo 	}
179158dbda0STejun Heo }
180158dbda0STejun Heo 
181158dbda0STejun Heo static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
182158dbda0STejun Heo {
183158dbda0STejun Heo 	struct elevator_queue *e = q->elevator;
184158dbda0STejun Heo 
18522f746e2STejun Heo 	if (e->type->ops.elevator_activate_req_fn)
18622f746e2STejun Heo 		e->type->ops.elevator_activate_req_fn(q, rq);
187158dbda0STejun Heo }
188158dbda0STejun Heo 
189158dbda0STejun Heo static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
190158dbda0STejun Heo {
191158dbda0STejun Heo 	struct elevator_queue *e = q->elevator;
192158dbda0STejun Heo 
19322f746e2STejun Heo 	if (e->type->ops.elevator_deactivate_req_fn)
19422f746e2STejun Heo 		e->type->ops.elevator_deactivate_req_fn(q, rq);
195158dbda0STejun Heo }
196158dbda0STejun Heo 
197581d4e28SJens Axboe #ifdef CONFIG_FAIL_IO_TIMEOUT
198581d4e28SJens Axboe int blk_should_fake_timeout(struct request_queue *);
199581d4e28SJens Axboe ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
200581d4e28SJens Axboe ssize_t part_timeout_store(struct device *, struct device_attribute *,
201581d4e28SJens Axboe 				const char *, size_t);
202581d4e28SJens Axboe #else
203581d4e28SJens Axboe static inline int blk_should_fake_timeout(struct request_queue *q)
204581d4e28SJens Axboe {
205581d4e28SJens Axboe 	return 0;
206581d4e28SJens Axboe }
207581d4e28SJens Axboe #endif
208581d4e28SJens Axboe 
209d6d48196SJens Axboe int ll_back_merge_fn(struct request_queue *q, struct request *req,
210d6d48196SJens Axboe 		     struct bio *bio);
211d6d48196SJens Axboe int ll_front_merge_fn(struct request_queue *q, struct request *req,
212d6d48196SJens Axboe 		      struct bio *bio);
213d6d48196SJens Axboe int attempt_back_merge(struct request_queue *q, struct request *rq);
214d6d48196SJens Axboe int attempt_front_merge(struct request_queue *q, struct request *rq);
2155e84ea3aSJens Axboe int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
2165e84ea3aSJens Axboe 				struct request *next);
217d6d48196SJens Axboe void blk_recalc_rq_segments(struct request *rq);
21880a761fdSTejun Heo void blk_rq_set_mixed_merge(struct request *rq);
219050c8ea8STejun Heo bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
220050c8ea8STejun Heo int blk_try_merge(struct request *rq, struct bio *bio);
221d6d48196SJens Axboe 
2228324aa91SJens Axboe void blk_queue_congestion_threshold(struct request_queue *q);
2238324aa91SJens Axboe 
224ff88972cSAdrian Bunk int blk_dev_init(void);
225ff88972cSAdrian Bunk 
226f253b86bSJens Axboe 
2278324aa91SJens Axboe /*
2288324aa91SJens Axboe  * Return the threshold (number of used requests) at which the queue is
2298324aa91SJens Axboe  * considered to be congested.  It include a little hysteresis to keep the
2308324aa91SJens Axboe  * context switch rate down.
2318324aa91SJens Axboe  */
2328324aa91SJens Axboe static inline int queue_congestion_on_threshold(struct request_queue *q)
2338324aa91SJens Axboe {
2348324aa91SJens Axboe 	return q->nr_congestion_on;
2358324aa91SJens Axboe }
2368324aa91SJens Axboe 
2378324aa91SJens Axboe /*
2388324aa91SJens Axboe  * The threshold at which a queue is considered to be uncongested
2398324aa91SJens Axboe  */
2408324aa91SJens Axboe static inline int queue_congestion_off_threshold(struct request_queue *q)
2418324aa91SJens Axboe {
2428324aa91SJens Axboe 	return q->nr_congestion_off;
2438324aa91SJens Axboe }
2448324aa91SJens Axboe 
245e3a2b3f9SJens Axboe extern int blk_update_nr_requests(struct request_queue *, unsigned int);
246e3a2b3f9SJens Axboe 
247c2553b58SJens Axboe /*
248c2553b58SJens Axboe  * Contribute to IO statistics IFF:
249c2553b58SJens Axboe  *
250c2553b58SJens Axboe  *	a) it's attached to a gendisk, and
251c2553b58SJens Axboe  *	b) the queue had IO stats enabled when this request was started, and
252e2a60da7SMartin K. Petersen  *	c) it's a file system request
253c2553b58SJens Axboe  */
25426308eabSJerome Marchand static inline int blk_do_io_stat(struct request *rq)
255fb8ec18cSJens Axboe {
25633659ebbSChristoph Hellwig 	return rq->rq_disk &&
25733659ebbSChristoph Hellwig 	       (rq->cmd_flags & REQ_IO_STAT) &&
258e2a60da7SMartin K. Petersen 		(rq->cmd_type == REQ_TYPE_FS);
259fb8ec18cSJens Axboe }
260fb8ec18cSJens Axboe 
261f2dbd76aSTejun Heo /*
262f2dbd76aSTejun Heo  * Internal io_context interface
263f2dbd76aSTejun Heo  */
264f2dbd76aSTejun Heo void get_io_context(struct io_context *ioc);
26547fdd4caSTejun Heo struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
26624acfc34STejun Heo struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
26724acfc34STejun Heo 			     gfp_t gfp_mask);
2687e5a8794STejun Heo void ioc_clear_queue(struct request_queue *q);
269f2dbd76aSTejun Heo 
27024acfc34STejun Heo int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
271f2dbd76aSTejun Heo 
272f2dbd76aSTejun Heo /**
273f2dbd76aSTejun Heo  * create_io_context - try to create task->io_context
274f2dbd76aSTejun Heo  * @gfp_mask: allocation mask
275f2dbd76aSTejun Heo  * @node: allocation node
276f2dbd76aSTejun Heo  *
27724acfc34STejun Heo  * If %current->io_context is %NULL, allocate a new io_context and install
27824acfc34STejun Heo  * it.  Returns the current %current->io_context which may be %NULL if
27924acfc34STejun Heo  * allocation failed.
280f2dbd76aSTejun Heo  *
281f2dbd76aSTejun Heo  * Note that this function can't be called with IRQ disabled because
28224acfc34STejun Heo  * task_lock which protects %current->io_context is IRQ-unsafe.
283f2dbd76aSTejun Heo  */
28424acfc34STejun Heo static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
285f2dbd76aSTejun Heo {
286f2dbd76aSTejun Heo 	WARN_ON_ONCE(irqs_disabled());
28724acfc34STejun Heo 	if (unlikely(!current->io_context))
28824acfc34STejun Heo 		create_task_io_context(current, gfp_mask, node);
28924acfc34STejun Heo 	return current->io_context;
290f2dbd76aSTejun Heo }
291f2dbd76aSTejun Heo 
292f2dbd76aSTejun Heo /*
293f2dbd76aSTejun Heo  * Internal throttling interface
294f2dbd76aSTejun Heo  */
295bc9fcbf9STejun Heo #ifdef CONFIG_BLK_DEV_THROTTLING
296c9a929ddSTejun Heo extern void blk_throtl_drain(struct request_queue *q);
297bc9fcbf9STejun Heo extern int blk_throtl_init(struct request_queue *q);
298bc9fcbf9STejun Heo extern void blk_throtl_exit(struct request_queue *q);
299bc9fcbf9STejun Heo #else /* CONFIG_BLK_DEV_THROTTLING */
300c9a929ddSTejun Heo static inline void blk_throtl_drain(struct request_queue *q) { }
301bc9fcbf9STejun Heo static inline int blk_throtl_init(struct request_queue *q) { return 0; }
302bc9fcbf9STejun Heo static inline void blk_throtl_exit(struct request_queue *q) { }
303bc9fcbf9STejun Heo #endif /* CONFIG_BLK_DEV_THROTTLING */
304bc9fcbf9STejun Heo 
305bc9fcbf9STejun Heo #endif /* BLK_INTERNAL_H */
306