xref: /linux/block/blk.h (revision 9934c8c04561413609d2bc38c6b9f268cba774a4)
18324aa91SJens Axboe #ifndef BLK_INTERNAL_H
28324aa91SJens Axboe #define BLK_INTERNAL_H
38324aa91SJens Axboe 
486db1e29SJens Axboe /* Amount of time in which a process may batch requests */
586db1e29SJens Axboe #define BLK_BATCH_TIME	(HZ/50UL)
686db1e29SJens Axboe 
786db1e29SJens Axboe /* Number of requests a "batching" process may submit */
886db1e29SJens Axboe #define BLK_BATCH_REQ	32
986db1e29SJens Axboe 
108324aa91SJens Axboe extern struct kmem_cache *blk_requestq_cachep;
118324aa91SJens Axboe extern struct kobj_type blk_queue_ktype;
128324aa91SJens Axboe 
1386db1e29SJens Axboe void init_request_from_bio(struct request *req, struct bio *bio);
1486db1e29SJens Axboe void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
1586db1e29SJens Axboe 			struct bio *bio);
16*9934c8c0STejun Heo void blk_dequeue_request(struct request *rq);
178324aa91SJens Axboe void __blk_queue_free_tags(struct request_queue *q);
188324aa91SJens Axboe 
1986db1e29SJens Axboe void blk_unplug_work(struct work_struct *work);
2086db1e29SJens Axboe void blk_unplug_timeout(unsigned long data);
21242f9dcbSJens Axboe void blk_rq_timed_out_timer(unsigned long data);
22242f9dcbSJens Axboe void blk_delete_timer(struct request *);
23242f9dcbSJens Axboe void blk_add_timer(struct request *);
24f73e2d13SJens Axboe void __generic_unplug_device(struct request_queue *);
25242f9dcbSJens Axboe 
26242f9dcbSJens Axboe /*
27242f9dcbSJens Axboe  * Internal atomic flags for request handling
28242f9dcbSJens Axboe  */
29242f9dcbSJens Axboe enum rq_atomic_flags {
30242f9dcbSJens Axboe 	REQ_ATOM_COMPLETE = 0,
31242f9dcbSJens Axboe };
32242f9dcbSJens Axboe 
33242f9dcbSJens Axboe /*
34242f9dcbSJens Axboe  * EH timer and IO completion will both attempt to 'grab' the request, make
35242f9dcbSJens Axboe  * sure that only one of them suceeds
36242f9dcbSJens Axboe  */
37242f9dcbSJens Axboe static inline int blk_mark_rq_complete(struct request *rq)
38242f9dcbSJens Axboe {
39242f9dcbSJens Axboe 	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
40242f9dcbSJens Axboe }
41242f9dcbSJens Axboe 
42242f9dcbSJens Axboe static inline void blk_clear_rq_complete(struct request *rq)
43242f9dcbSJens Axboe {
44242f9dcbSJens Axboe 	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
45242f9dcbSJens Axboe }
4686db1e29SJens Axboe 
47158dbda0STejun Heo /*
48158dbda0STejun Heo  * Internal elevator interface
49158dbda0STejun Heo  */
50158dbda0STejun Heo #define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))
51158dbda0STejun Heo 
52158dbda0STejun Heo static inline struct request *__elv_next_request(struct request_queue *q)
53158dbda0STejun Heo {
54158dbda0STejun Heo 	struct request *rq;
55158dbda0STejun Heo 
56158dbda0STejun Heo 	while (1) {
57158dbda0STejun Heo 		while (!list_empty(&q->queue_head)) {
58158dbda0STejun Heo 			rq = list_entry_rq(q->queue_head.next);
59158dbda0STejun Heo 			if (blk_do_ordered(q, &rq))
60158dbda0STejun Heo 				return rq;
61158dbda0STejun Heo 		}
62158dbda0STejun Heo 
63158dbda0STejun Heo 		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
64158dbda0STejun Heo 			return NULL;
65158dbda0STejun Heo 	}
66158dbda0STejun Heo }
67158dbda0STejun Heo 
68158dbda0STejun Heo static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
69158dbda0STejun Heo {
70158dbda0STejun Heo 	struct elevator_queue *e = q->elevator;
71158dbda0STejun Heo 
72158dbda0STejun Heo 	if (e->ops->elevator_activate_req_fn)
73158dbda0STejun Heo 		e->ops->elevator_activate_req_fn(q, rq);
74158dbda0STejun Heo }
75158dbda0STejun Heo 
76158dbda0STejun Heo static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
77158dbda0STejun Heo {
78158dbda0STejun Heo 	struct elevator_queue *e = q->elevator;
79158dbda0STejun Heo 
80158dbda0STejun Heo 	if (e->ops->elevator_deactivate_req_fn)
81158dbda0STejun Heo 		e->ops->elevator_deactivate_req_fn(q, rq);
82158dbda0STejun Heo }
83158dbda0STejun Heo 
84581d4e28SJens Axboe #ifdef CONFIG_FAIL_IO_TIMEOUT
85581d4e28SJens Axboe int blk_should_fake_timeout(struct request_queue *);
86581d4e28SJens Axboe ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
87581d4e28SJens Axboe ssize_t part_timeout_store(struct device *, struct device_attribute *,
88581d4e28SJens Axboe 				const char *, size_t);
89581d4e28SJens Axboe #else
90581d4e28SJens Axboe static inline int blk_should_fake_timeout(struct request_queue *q)
91581d4e28SJens Axboe {
92581d4e28SJens Axboe 	return 0;
93581d4e28SJens Axboe }
94581d4e28SJens Axboe #endif
95581d4e28SJens Axboe 
9686db1e29SJens Axboe struct io_context *current_io_context(gfp_t gfp_flags, int node);
9786db1e29SJens Axboe 
98d6d48196SJens Axboe int ll_back_merge_fn(struct request_queue *q, struct request *req,
99d6d48196SJens Axboe 		     struct bio *bio);
100d6d48196SJens Axboe int ll_front_merge_fn(struct request_queue *q, struct request *req,
101d6d48196SJens Axboe 		      struct bio *bio);
102d6d48196SJens Axboe int attempt_back_merge(struct request_queue *q, struct request *rq);
103d6d48196SJens Axboe int attempt_front_merge(struct request_queue *q, struct request *rq);
104d6d48196SJens Axboe void blk_recalc_rq_segments(struct request *rq);
105d6d48196SJens Axboe 
1068324aa91SJens Axboe void blk_queue_congestion_threshold(struct request_queue *q);
1078324aa91SJens Axboe 
108ff88972cSAdrian Bunk int blk_dev_init(void);
109ff88972cSAdrian Bunk 
110f600abe2SJens Axboe void elv_quiesce_start(struct request_queue *q);
111f600abe2SJens Axboe void elv_quiesce_end(struct request_queue *q);
1126c7e8ceeSJens Axboe 
1136c7e8ceeSJens Axboe 
1148324aa91SJens Axboe /*
1158324aa91SJens Axboe  * Return the threshold (number of used requests) at which the queue is
1168324aa91SJens Axboe  * considered to be congested.  It include a little hysteresis to keep the
1178324aa91SJens Axboe  * context switch rate down.
1188324aa91SJens Axboe  */
1198324aa91SJens Axboe static inline int queue_congestion_on_threshold(struct request_queue *q)
1208324aa91SJens Axboe {
1218324aa91SJens Axboe 	return q->nr_congestion_on;
1228324aa91SJens Axboe }
1238324aa91SJens Axboe 
1248324aa91SJens Axboe /*
1258324aa91SJens Axboe  * The threshold at which a queue is considered to be uncongested
1268324aa91SJens Axboe  */
1278324aa91SJens Axboe static inline int queue_congestion_off_threshold(struct request_queue *q)
1288324aa91SJens Axboe {
1298324aa91SJens Axboe 	return q->nr_congestion_off;
1308324aa91SJens Axboe }
1318324aa91SJens Axboe 
1327ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY)
1337ba1ba12SMartin K. Petersen 
1347ba1ba12SMartin K. Petersen #define rq_for_each_integrity_segment(bvl, _rq, _iter)		\
1357ba1ba12SMartin K. Petersen 	__rq_for_each_bio(_iter.bio, _rq)			\
1367ba1ba12SMartin K. Petersen 		bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)
1377ba1ba12SMartin K. Petersen 
1387ba1ba12SMartin K. Petersen #endif /* BLK_DEV_INTEGRITY */
1397ba1ba12SMartin K. Petersen 
140c7c22e4dSJens Axboe static inline int blk_cpu_to_group(int cpu)
141c7c22e4dSJens Axboe {
142c7c22e4dSJens Axboe #ifdef CONFIG_SCHED_MC
143be4d638cSRusty Russell 	const struct cpumask *mask = cpu_coregroup_mask(cpu);
144be4d638cSRusty Russell 	return cpumask_first(mask);
145c7c22e4dSJens Axboe #elif defined(CONFIG_SCHED_SMT)
146c69fc56dSRusty Russell 	return cpumask_first(topology_thread_cpumask(cpu));
147c7c22e4dSJens Axboe #else
148c7c22e4dSJens Axboe 	return cpu;
149c7c22e4dSJens Axboe #endif
150c7c22e4dSJens Axboe }
151c7c22e4dSJens Axboe 
152c2553b58SJens Axboe /*
153c2553b58SJens Axboe  * Contribute to IO statistics IFF:
154c2553b58SJens Axboe  *
155c2553b58SJens Axboe  *	a) it's attached to a gendisk, and
156c2553b58SJens Axboe  *	b) the queue had IO stats enabled when this request was started, and
157c2553b58SJens Axboe  *	c) it's a file system request
158c2553b58SJens Axboe  */
15926308eabSJerome Marchand static inline int blk_do_io_stat(struct request *rq)
160fb8ec18cSJens Axboe {
161c69d4854SJens Axboe 	return rq->rq_disk && blk_rq_io_stat(rq) && blk_fs_request(rq) &&
162c69d4854SJens Axboe 		blk_discard_rq(rq);
163fb8ec18cSJens Axboe }
164fb8ec18cSJens Axboe 
1658324aa91SJens Axboe #endif
166