1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "blk.h"
18 #include "cfq.h"
19 
20 /*
21  * tunables
22  */
23 /* max queue in one round of service */
24 static const int cfq_quantum = 8;
25 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
26 /* maximum backwards seek, in KiB */
27 static const int cfq_back_max = 16 * 1024;
28 /* penalty of a backwards seek */
29 static const int cfq_back_penalty = 2;
30 static const int cfq_slice_sync = HZ / 10;
31 static int cfq_slice_async = HZ / 25;
32 static const int cfq_slice_async_rq = 2;
33 static int cfq_slice_idle = HZ / 125;
34 static int cfq_group_idle = HZ / 125;
35 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
36 static const int cfq_hist_divisor = 4;
37 
38 /*
39  * offset from end of service tree
40  */
41 #define CFQ_IDLE_DELAY		(HZ / 5)
42 
43 /*
44  * below this threshold, we consider thinktime immediate
45  */
46 #define CFQ_MIN_TT		(2)
47 
48 #define CFQ_SLICE_SCALE		(5)
49 #define CFQ_HW_QUEUE_MIN	(5)
50 #define CFQ_SERVICE_SHIFT       12
51 
52 #define CFQQ_SEEK_THR		(sector_t)(8 * 100)
53 #define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)
54 #define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
55 #define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)
56 
57 #define RQ_CIC(rq)		icq_to_cic((rq)->elv.icq)
58 #define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elv.priv[0])
59 #define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elv.priv[1])
60 
61 static struct kmem_cache *cfq_pool;
62 
63 #define CFQ_PRIO_LISTS		IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66 
67 #define sample_valid(samples)	((samples) > 80)
68 #define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)
69 
70 struct cfq_ttime {
71 	unsigned long last_end_request;
72 
73 	unsigned long ttime_total;
74 	unsigned long ttime_samples;
75 	unsigned long ttime_mean;
76 };
77 
78 /*
79  * Most of our rbtree usage is for sorting with min extraction, so
80  * if we cache the leftmost node we don't have to walk down the tree
81  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82  * move this into the elevator for the rq sorting as well.
83  */
84 struct cfq_rb_root {
85 	struct rb_root rb;
86 	struct rb_node *left;
87 	unsigned count;
88 	unsigned total_weight;
89 	u64 min_vdisktime;
90 	struct cfq_ttime ttime;
91 };
92 #define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, \
93 			.ttime = {.last_end_request = jiffies,},}
94 
95 /*
96  * Per process-grouping structure
97  */
98 struct cfq_queue {
99 	/* reference count */
100 	int ref;
101 	/* various state flags, see below */
102 	unsigned int flags;
103 	/* parent cfq_data */
104 	struct cfq_data *cfqd;
105 	/* service_tree member */
106 	struct rb_node rb_node;
107 	/* service_tree key */
108 	unsigned long rb_key;
109 	/* prio tree member */
110 	struct rb_node p_node;
111 	/* prio tree root we belong to, if any */
112 	struct rb_root *p_root;
113 	/* sorted list of pending requests */
114 	struct rb_root sort_list;
115 	/* if fifo isn't expired, next request to serve */
116 	struct request *next_rq;
117 	/* requests queued in sort_list */
118 	int queued[2];
119 	/* currently allocated requests */
120 	int allocated[2];
121 	/* fifo list of requests in sort_list */
122 	struct list_head fifo;
123 
124 	/* time when queue got scheduled in to dispatch first request. */
125 	unsigned long dispatch_start;
126 	unsigned int allocated_slice;
127 	unsigned int slice_dispatch;
128 	/* time when first request from queue completed and slice started. */
129 	unsigned long slice_start;
130 	unsigned long slice_end;
131 	long slice_resid;
132 
133 	/* pending priority requests */
134 	int prio_pending;
135 	/* number of requests that are on the dispatch list or inside driver */
136 	int dispatched;
137 
138 	/* io prio of this group */
139 	unsigned short ioprio, org_ioprio;
140 	unsigned short ioprio_class;
141 
142 	pid_t pid;
143 
144 	u32 seek_history;
145 	sector_t last_request_pos;
146 
147 	struct cfq_rb_root *service_tree;
148 	struct cfq_queue *new_cfqq;
149 	struct cfq_group *cfqg;
150 	/* Number of sectors dispatched from queue in single dispatch round */
151 	unsigned long nr_sectors;
152 };
153 
154 /*
155  * First index in the service_trees.
156  * IDLE is handled separately, so it has negative index
157  */
158 enum wl_prio_t {
159 	BE_WORKLOAD = 0,
160 	RT_WORKLOAD = 1,
161 	IDLE_WORKLOAD = 2,
162 	CFQ_PRIO_NR,
163 };
164 
165 /*
166  * Second index in the service_trees.
167  */
168 enum wl_type_t {
169 	ASYNC_WORKLOAD = 0,
170 	SYNC_NOIDLE_WORKLOAD = 1,
171 	SYNC_WORKLOAD = 2
172 };
173 
174 /* This is per cgroup per device grouping structure */
175 struct cfq_group {
176 	/* group service_tree member */
177 	struct rb_node rb_node;
178 
179 	/* group service_tree key */
180 	u64 vdisktime;
181 	unsigned int weight;
182 	unsigned int new_weight;
183 	bool needs_update;
184 
185 	/* number of cfqq currently on this group */
186 	int nr_cfqq;
187 
188 	/*
189 	 * Per group busy queues average. Useful for workload slice calc. We
190 	 * create the array for each prio class but at run time it is used
191 	 * only for RT and BE class and slot for IDLE class remains unused.
192 	 * This is primarily done to avoid confusion and a gcc warning.
193 	 */
194 	unsigned int busy_queues_avg[CFQ_PRIO_NR];
195 	/*
196 	 * rr lists of queues with requests. We maintain service trees for
197 	 * RT and BE classes. These trees are subdivided in subclasses
198 	 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
199 	 * class there is no subclassification and all the cfq queues go on
200 	 * a single tree service_tree_idle.
201 	 * Counts are embedded in the cfq_rb_root
202 	 */
203 	struct cfq_rb_root service_trees[2][3];
204 	struct cfq_rb_root service_tree_idle;
205 
206 	unsigned long saved_workload_slice;
207 	enum wl_type_t saved_workload;
208 	enum wl_prio_t saved_serving_prio;
209 	struct blkio_group blkg;
210 #ifdef CONFIG_CFQ_GROUP_IOSCHED
211 	struct hlist_node cfqd_node;
212 	int ref;
213 #endif
214 	/* number of requests that are on the dispatch list or inside driver */
215 	int dispatched;
216 	struct cfq_ttime ttime;
217 };
218 
219 struct cfq_io_cq {
220 	struct io_cq		icq;		/* must be the first member */
221 	struct cfq_queue	*cfqq[2];
222 	struct cfq_ttime	ttime;
223 };
224 
225 /*
226  * Per block device queue structure
227  */
228 struct cfq_data {
229 	struct request_queue *queue;
230 	/* Root service tree for cfq_groups */
231 	struct cfq_rb_root grp_service_tree;
232 	struct cfq_group root_group;
233 
234 	/*
235 	 * The priority currently being served
236 	 */
237 	enum wl_prio_t serving_prio;
238 	enum wl_type_t serving_type;
239 	unsigned long workload_expires;
240 	struct cfq_group *serving_group;
241 
242 	/*
243 	 * Each priority tree is sorted by next_request position.  These
244 	 * trees are used when determining if two or more queues are
245 	 * interleaving requests (see cfq_close_cooperator).
246 	 */
247 	struct rb_root prio_trees[CFQ_PRIO_LISTS];
248 
249 	unsigned int busy_queues;
250 	unsigned int busy_sync_queues;
251 
252 	int rq_in_driver;
253 	int rq_in_flight[2];
254 
255 	/*
256 	 * queue-depth detection
257 	 */
258 	int rq_queued;
259 	int hw_tag;
260 	/*
261 	 * hw_tag can be
262 	 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
263 	 *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
264 	 *  0 => no NCQ
265 	 */
266 	int hw_tag_est_depth;
267 	unsigned int hw_tag_samples;
268 
269 	/*
270 	 * idle window management
271 	 */
272 	struct timer_list idle_slice_timer;
273 	struct work_struct unplug_work;
274 
275 	struct cfq_queue *active_queue;
276 	struct cfq_io_cq *active_cic;
277 
278 	/*
279 	 * async queue for each priority case
280 	 */
281 	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
282 	struct cfq_queue *async_idle_cfqq;
283 
284 	sector_t last_position;
285 
286 	/*
287 	 * tunables, see top of file
288 	 */
289 	unsigned int cfq_quantum;
290 	unsigned int cfq_fifo_expire[2];
291 	unsigned int cfq_back_penalty;
292 	unsigned int cfq_back_max;
293 	unsigned int cfq_slice[2];
294 	unsigned int cfq_slice_async_rq;
295 	unsigned int cfq_slice_idle;
296 	unsigned int cfq_group_idle;
297 	unsigned int cfq_latency;
298 
299 	/*
300 	 * Fallback dummy cfqq for extreme OOM conditions
301 	 */
302 	struct cfq_queue oom_cfqq;
303 
304 	unsigned long last_delayed_sync;
305 
306 	/* List of cfq groups being managed on this device*/
307 	struct hlist_head cfqg_list;
308 
309 	/* Number of groups which are on blkcg->blkg_list */
310 	unsigned int nr_blkcg_linked_grps;
311 };
312 
313 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
314 
service_tree_for(struct cfq_group * cfqg,enum wl_prio_t prio,enum wl_type_t type)315 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
316 					    enum wl_prio_t prio,
317 					    enum wl_type_t type)
318 {
319 	if (!cfqg)
320 		return NULL;
321 
322 	if (prio == IDLE_WORKLOAD)
323 		return &cfqg->service_tree_idle;
324 
325 	return &cfqg->service_trees[prio][type];
326 }
327 
328 enum cfqq_state_flags {
329 	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
330 	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
331 	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
332 	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
333 	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
334 	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
335 	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
336 	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
337 	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
338 	CFQ_CFQQ_FLAG_coop,		/* cfqq is shared */
339 	CFQ_CFQQ_FLAG_split_coop,	/* shared cfqq will be splitted */
340 	CFQ_CFQQ_FLAG_deep,		/* sync cfqq experienced large depth */
341 	CFQ_CFQQ_FLAG_wait_busy,	/* Waiting for next request */
342 };
343 
344 #define CFQ_CFQQ_FNS(name)						\
345 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
346 {									\
347 	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
348 }									\
349 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
350 {									\
351 	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
352 }									\
353 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
354 {									\
355 	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
356 }
357 
358 CFQ_CFQQ_FNS(on_rr);
359 CFQ_CFQQ_FNS(wait_request);
360 CFQ_CFQQ_FNS(must_dispatch);
361 CFQ_CFQQ_FNS(must_alloc_slice);
362 CFQ_CFQQ_FNS(fifo_expire);
363 CFQ_CFQQ_FNS(idle_window);
364 CFQ_CFQQ_FNS(prio_changed);
365 CFQ_CFQQ_FNS(slice_new);
366 CFQ_CFQQ_FNS(sync);
367 CFQ_CFQQ_FNS(coop);
368 CFQ_CFQQ_FNS(split_coop);
369 CFQ_CFQQ_FNS(deep);
370 CFQ_CFQQ_FNS(wait_busy);
371 #undef CFQ_CFQQ_FNS
372 
373 #ifdef CONFIG_CFQ_GROUP_IOSCHED
374 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
375 	blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
376 			cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
377 			blkg_path(&(cfqq)->cfqg->blkg), ##args)
378 
379 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)				\
380 	blk_add_trace_msg((cfqd)->queue, "%s " fmt,			\
381 				blkg_path(&(cfqg)->blkg), ##args)       \
382 
383 #else
384 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
385 	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
386 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
387 #endif
388 #define cfq_log(cfqd, fmt, args...)	\
389 	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
390 
391 /* Traverses through cfq group service trees */
392 #define for_each_cfqg_st(cfqg, i, j, st) \
393 	for (i = 0; i <= IDLE_WORKLOAD; i++) \
394 		for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
395 			: &cfqg->service_tree_idle; \
396 			(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
397 			(i == IDLE_WORKLOAD && j == 0); \
398 			j++, st = i < IDLE_WORKLOAD ? \
399 			&cfqg->service_trees[i][j]: NULL) \
400 
cfq_io_thinktime_big(struct cfq_data * cfqd,struct cfq_ttime * ttime,bool group_idle)401 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
402 	struct cfq_ttime *ttime, bool group_idle)
403 {
404 	unsigned long slice;
405 	if (!sample_valid(ttime->ttime_samples))
406 		return false;
407 	if (group_idle)
408 		slice = cfqd->cfq_group_idle;
409 	else
410 		slice = cfqd->cfq_slice_idle;
411 	return ttime->ttime_mean > slice;
412 }
413 
iops_mode(struct cfq_data * cfqd)414 static inline bool iops_mode(struct cfq_data *cfqd)
415 {
416 	/*
417 	 * If we are not idling on queues and it is a NCQ drive, parallel
418 	 * execution of requests is on and measuring time is not possible
419 	 * in most of the cases until and unless we drive shallower queue
420 	 * depths and that becomes a performance bottleneck. In such cases
421 	 * switch to start providing fairness in terms of number of IOs.
422 	 */
423 	if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
424 		return true;
425 	else
426 		return false;
427 }
428 
cfqq_prio(struct cfq_queue * cfqq)429 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
430 {
431 	if (cfq_class_idle(cfqq))
432 		return IDLE_WORKLOAD;
433 	if (cfq_class_rt(cfqq))
434 		return RT_WORKLOAD;
435 	return BE_WORKLOAD;
436 }
437 
438 
cfqq_type(struct cfq_queue * cfqq)439 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
440 {
441 	if (!cfq_cfqq_sync(cfqq))
442 		return ASYNC_WORKLOAD;
443 	if (!cfq_cfqq_idle_window(cfqq))
444 		return SYNC_NOIDLE_WORKLOAD;
445 	return SYNC_WORKLOAD;
446 }
447 
cfq_group_busy_queues_wl(enum wl_prio_t wl,struct cfq_data * cfqd,struct cfq_group * cfqg)448 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
449 					struct cfq_data *cfqd,
450 					struct cfq_group *cfqg)
451 {
452 	if (wl == IDLE_WORKLOAD)
453 		return cfqg->service_tree_idle.count;
454 
455 	return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
456 		+ cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
457 		+ cfqg->service_trees[wl][SYNC_WORKLOAD].count;
458 }
459 
cfqg_busy_async_queues(struct cfq_data * cfqd,struct cfq_group * cfqg)460 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
461 					struct cfq_group *cfqg)
462 {
463 	return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
464 		+ cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
465 }
466 
467 static void cfq_dispatch_insert(struct request_queue *, struct request *);
468 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
469 				       struct io_context *, gfp_t);
470 
icq_to_cic(struct io_cq * icq)471 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
472 {
473 	/* cic->icq is the first member, %NULL will convert to %NULL */
474 	return container_of(icq, struct cfq_io_cq, icq);
475 }
476 
cfq_cic_lookup(struct cfq_data * cfqd,struct io_context * ioc)477 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
478 					       struct io_context *ioc)
479 {
480 	if (ioc)
481 		return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
482 	return NULL;
483 }
484 
cic_to_cfqq(struct cfq_io_cq * cic,bool is_sync)485 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
486 {
487 	return cic->cfqq[is_sync];
488 }
489 
cic_set_cfqq(struct cfq_io_cq * cic,struct cfq_queue * cfqq,bool is_sync)490 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
491 				bool is_sync)
492 {
493 	cic->cfqq[is_sync] = cfqq;
494 }
495 
cic_to_cfqd(struct cfq_io_cq * cic)496 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
497 {
498 	return cic->icq.q->elevator->elevator_data;
499 }
500 
501 /*
502  * We regard a request as SYNC, if it's either a read or has the SYNC bit
503  * set (in which case it could also be direct WRITE).
504  */
cfq_bio_sync(struct bio * bio)505 static inline bool cfq_bio_sync(struct bio *bio)
506 {
507 	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
508 }
509 
510 /*
511  * scheduler run of queue, if there are requests pending and no one in the
512  * driver that will restart queueing
513  */
cfq_schedule_dispatch(struct cfq_data * cfqd)514 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
515 {
516 	if (cfqd->busy_queues) {
517 		cfq_log(cfqd, "schedule dispatch");
518 		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
519 	}
520 }
521 
522 /*
523  * Scale schedule slice based on io priority. Use the sync time slice only
524  * if a queue is marked sync and has sync io queued. A sync queue with async
525  * io only, should not get full sync slice length.
526  */
cfq_prio_slice(struct cfq_data * cfqd,bool sync,unsigned short prio)527 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
528 				 unsigned short prio)
529 {
530 	const int base_slice = cfqd->cfq_slice[sync];
531 
532 	WARN_ON(prio >= IOPRIO_BE_NR);
533 
534 	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
535 }
536 
537 static inline int
cfq_prio_to_slice(struct cfq_data * cfqd,struct cfq_queue * cfqq)538 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
539 {
540 	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
541 }
542 
cfq_scale_slice(unsigned long delta,struct cfq_group * cfqg)543 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
544 {
545 	u64 d = delta << CFQ_SERVICE_SHIFT;
546 
547 	d = d * BLKIO_WEIGHT_DEFAULT;
548 	do_div(d, cfqg->weight);
549 	return d;
550 }
551 
max_vdisktime(u64 min_vdisktime,u64 vdisktime)552 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
553 {
554 	s64 delta = (s64)(vdisktime - min_vdisktime);
555 	if (delta > 0)
556 		min_vdisktime = vdisktime;
557 
558 	return min_vdisktime;
559 }
560 
min_vdisktime(u64 min_vdisktime,u64 vdisktime)561 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
562 {
563 	s64 delta = (s64)(vdisktime - min_vdisktime);
564 	if (delta < 0)
565 		min_vdisktime = vdisktime;
566 
567 	return min_vdisktime;
568 }
569 
update_min_vdisktime(struct cfq_rb_root * st)570 static void update_min_vdisktime(struct cfq_rb_root *st)
571 {
572 	struct cfq_group *cfqg;
573 
574 	if (st->left) {
575 		cfqg = rb_entry_cfqg(st->left);
576 		st->min_vdisktime = max_vdisktime(st->min_vdisktime,
577 						  cfqg->vdisktime);
578 	}
579 }
580 
581 /*
582  * get averaged number of queues of RT/BE priority.
583  * average is updated, with a formula that gives more weight to higher numbers,
584  * to quickly follows sudden increases and decrease slowly
585  */
586 
cfq_group_get_avg_queues(struct cfq_data * cfqd,struct cfq_group * cfqg,bool rt)587 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
588 					struct cfq_group *cfqg, bool rt)
589 {
590 	unsigned min_q, max_q;
591 	unsigned mult  = cfq_hist_divisor - 1;
592 	unsigned round = cfq_hist_divisor / 2;
593 	unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
594 
595 	min_q = min(cfqg->busy_queues_avg[rt], busy);
596 	max_q = max(cfqg->busy_queues_avg[rt], busy);
597 	cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
598 		cfq_hist_divisor;
599 	return cfqg->busy_queues_avg[rt];
600 }
601 
602 static inline unsigned
cfq_group_slice(struct cfq_data * cfqd,struct cfq_group * cfqg)603 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
604 {
605 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
606 
607 	return cfq_target_latency * cfqg->weight / st->total_weight;
608 }
609 
610 static inline unsigned
cfq_scaled_cfqq_slice(struct cfq_data * cfqd,struct cfq_queue * cfqq)611 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
612 {
613 	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
614 	if (cfqd->cfq_latency) {
615 		/*
616 		 * interested queues (we consider only the ones with the same
617 		 * priority class in the cfq group)
618 		 */
619 		unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
620 						cfq_class_rt(cfqq));
621 		unsigned sync_slice = cfqd->cfq_slice[1];
622 		unsigned expect_latency = sync_slice * iq;
623 		unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
624 
625 		if (expect_latency > group_slice) {
626 			unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
627 			/* scale low_slice according to IO priority
628 			 * and sync vs async */
629 			unsigned low_slice =
630 				min(slice, base_low_slice * slice / sync_slice);
631 			/* the adapted slice value is scaled to fit all iqs
632 			 * into the target latency */
633 			slice = max(slice * group_slice / expect_latency,
634 				    low_slice);
635 		}
636 	}
637 	return slice;
638 }
639 
640 static inline void
cfq_set_prio_slice(struct cfq_data * cfqd,struct cfq_queue * cfqq)641 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
642 {
643 	unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
644 
645 	cfqq->slice_start = jiffies;
646 	cfqq->slice_end = jiffies + slice;
647 	cfqq->allocated_slice = slice;
648 	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
649 }
650 
651 /*
652  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
653  * isn't valid until the first request from the dispatch is activated
654  * and the slice time set.
655  */
cfq_slice_used(struct cfq_queue * cfqq)656 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
657 {
658 	if (cfq_cfqq_slice_new(cfqq))
659 		return false;
660 	if (time_before(jiffies, cfqq->slice_end))
661 		return false;
662 
663 	return true;
664 }
665 
666 /*
667  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
668  * We choose the request that is closest to the head right now. Distance
669  * behind the head is penalized and only allowed to a certain extent.
670  */
671 static struct request *
cfq_choose_req(struct cfq_data * cfqd,struct request * rq1,struct request * rq2,sector_t last)672 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
673 {
674 	sector_t s1, s2, d1 = 0, d2 = 0;
675 	unsigned long back_max;
676 #define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
677 #define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
678 	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
679 
680 	if (rq1 == NULL || rq1 == rq2)
681 		return rq2;
682 	if (rq2 == NULL)
683 		return rq1;
684 
685 	if (rq_is_sync(rq1) != rq_is_sync(rq2))
686 		return rq_is_sync(rq1) ? rq1 : rq2;
687 
688 	if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
689 		return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
690 
691 	s1 = blk_rq_pos(rq1);
692 	s2 = blk_rq_pos(rq2);
693 
694 	/*
695 	 * by definition, 1KiB is 2 sectors
696 	 */
697 	back_max = cfqd->cfq_back_max * 2;
698 
699 	/*
700 	 * Strict one way elevator _except_ in the case where we allow
701 	 * short backward seeks which are biased as twice the cost of a
702 	 * similar forward seek.
703 	 */
704 	if (s1 >= last)
705 		d1 = s1 - last;
706 	else if (s1 + back_max >= last)
707 		d1 = (last - s1) * cfqd->cfq_back_penalty;
708 	else
709 		wrap |= CFQ_RQ1_WRAP;
710 
711 	if (s2 >= last)
712 		d2 = s2 - last;
713 	else if (s2 + back_max >= last)
714 		d2 = (last - s2) * cfqd->cfq_back_penalty;
715 	else
716 		wrap |= CFQ_RQ2_WRAP;
717 
718 	/* Found required data */
719 
720 	/*
721 	 * By doing switch() on the bit mask "wrap" we avoid having to
722 	 * check two variables for all permutations: --> faster!
723 	 */
724 	switch (wrap) {
725 	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
726 		if (d1 < d2)
727 			return rq1;
728 		else if (d2 < d1)
729 			return rq2;
730 		else {
731 			if (s1 >= s2)
732 				return rq1;
733 			else
734 				return rq2;
735 		}
736 
737 	case CFQ_RQ2_WRAP:
738 		return rq1;
739 	case CFQ_RQ1_WRAP:
740 		return rq2;
741 	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
742 	default:
743 		/*
744 		 * Since both rqs are wrapped,
745 		 * start with the one that's further behind head
746 		 * (--> only *one* back seek required),
747 		 * since back seek takes more time than forward.
748 		 */
749 		if (s1 <= s2)
750 			return rq1;
751 		else
752 			return rq2;
753 	}
754 }
755 
756 /*
757  * The below is leftmost cache rbtree addon
758  */
cfq_rb_first(struct cfq_rb_root * root)759 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
760 {
761 	/* Service tree is empty */
762 	if (!root->count)
763 		return NULL;
764 
765 	if (!root->left)
766 		root->left = rb_first(&root->rb);
767 
768 	if (root->left)
769 		return rb_entry(root->left, struct cfq_queue, rb_node);
770 
771 	return NULL;
772 }
773 
cfq_rb_first_group(struct cfq_rb_root * root)774 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
775 {
776 	if (!root->left)
777 		root->left = rb_first(&root->rb);
778 
779 	if (root->left)
780 		return rb_entry_cfqg(root->left);
781 
782 	return NULL;
783 }
784 
rb_erase_init(struct rb_node * n,struct rb_root * root)785 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
786 {
787 	rb_erase(n, root);
788 	RB_CLEAR_NODE(n);
789 }
790 
cfq_rb_erase(struct rb_node * n,struct cfq_rb_root * root)791 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
792 {
793 	if (root->left == n)
794 		root->left = NULL;
795 	rb_erase_init(n, &root->rb);
796 	--root->count;
797 }
798 
799 /*
800  * would be nice to take fifo expire time into account as well
801  */
802 static struct request *
cfq_find_next_rq(struct cfq_data * cfqd,struct cfq_queue * cfqq,struct request * last)803 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
804 		  struct request *last)
805 {
806 	struct rb_node *rbnext = rb_next(&last->rb_node);
807 	struct rb_node *rbprev = rb_prev(&last->rb_node);
808 	struct request *next = NULL, *prev = NULL;
809 
810 	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
811 
812 	if (rbprev)
813 		prev = rb_entry_rq(rbprev);
814 
815 	if (rbnext)
816 		next = rb_entry_rq(rbnext);
817 	else {
818 		rbnext = rb_first(&cfqq->sort_list);
819 		if (rbnext && rbnext != &last->rb_node)
820 			next = rb_entry_rq(rbnext);
821 	}
822 
823 	return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
824 }
825 
cfq_slice_offset(struct cfq_data * cfqd,struct cfq_queue * cfqq)826 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
827 				      struct cfq_queue *cfqq)
828 {
829 	/*
830 	 * just an approximation, should be ok.
831 	 */
832 	return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
833 		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
834 }
835 
836 static inline s64
cfqg_key(struct cfq_rb_root * st,struct cfq_group * cfqg)837 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
838 {
839 	return cfqg->vdisktime - st->min_vdisktime;
840 }
841 
842 static void
__cfq_group_service_tree_add(struct cfq_rb_root * st,struct cfq_group * cfqg)843 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
844 {
845 	struct rb_node **node = &st->rb.rb_node;
846 	struct rb_node *parent = NULL;
847 	struct cfq_group *__cfqg;
848 	s64 key = cfqg_key(st, cfqg);
849 	int left = 1;
850 
851 	while (*node != NULL) {
852 		parent = *node;
853 		__cfqg = rb_entry_cfqg(parent);
854 
855 		if (key < cfqg_key(st, __cfqg))
856 			node = &parent->rb_left;
857 		else {
858 			node = &parent->rb_right;
859 			left = 0;
860 		}
861 	}
862 
863 	if (left)
864 		st->left = &cfqg->rb_node;
865 
866 	rb_link_node(&cfqg->rb_node, parent, node);
867 	rb_insert_color(&cfqg->rb_node, &st->rb);
868 }
869 
870 static void
cfq_update_group_weight(struct cfq_group * cfqg)871 cfq_update_group_weight(struct cfq_group *cfqg)
872 {
873 	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
874 	if (cfqg->needs_update) {
875 		cfqg->weight = cfqg->new_weight;
876 		cfqg->needs_update = false;
877 	}
878 }
879 
880 static void
cfq_group_service_tree_add(struct cfq_rb_root * st,struct cfq_group * cfqg)881 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
882 {
883 	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
884 
885 	cfq_update_group_weight(cfqg);
886 	__cfq_group_service_tree_add(st, cfqg);
887 	st->total_weight += cfqg->weight;
888 }
889 
890 static void
cfq_group_notify_queue_add(struct cfq_data * cfqd,struct cfq_group * cfqg)891 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
892 {
893 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
894 	struct cfq_group *__cfqg;
895 	struct rb_node *n;
896 
897 	cfqg->nr_cfqq++;
898 	if (!RB_EMPTY_NODE(&cfqg->rb_node))
899 		return;
900 
901 	/*
902 	 * Currently put the group at the end. Later implement something
903 	 * so that groups get lesser vtime based on their weights, so that
904 	 * if group does not loose all if it was not continuously backlogged.
905 	 */
906 	n = rb_last(&st->rb);
907 	if (n) {
908 		__cfqg = rb_entry_cfqg(n);
909 		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
910 	} else
911 		cfqg->vdisktime = st->min_vdisktime;
912 	cfq_group_service_tree_add(st, cfqg);
913 }
914 
915 static void
cfq_group_service_tree_del(struct cfq_rb_root * st,struct cfq_group * cfqg)916 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
917 {
918 	st->total_weight -= cfqg->weight;
919 	if (!RB_EMPTY_NODE(&cfqg->rb_node))
920 		cfq_rb_erase(&cfqg->rb_node, st);
921 }
922 
923 static void
cfq_group_notify_queue_del(struct cfq_data * cfqd,struct cfq_group * cfqg)924 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
925 {
926 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
927 
928 	BUG_ON(cfqg->nr_cfqq < 1);
929 	cfqg->nr_cfqq--;
930 
931 	/* If there are other cfq queues under this group, don't delete it */
932 	if (cfqg->nr_cfqq)
933 		return;
934 
935 	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
936 	cfq_group_service_tree_del(st, cfqg);
937 	cfqg->saved_workload_slice = 0;
938 	cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
939 }
940 
cfq_cfqq_slice_usage(struct cfq_queue * cfqq,unsigned int * unaccounted_time)941 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
942 						unsigned int *unaccounted_time)
943 {
944 	unsigned int slice_used;
945 
946 	/*
947 	 * Queue got expired before even a single request completed or
948 	 * got expired immediately after first request completion.
949 	 */
950 	if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
951 		/*
952 		 * Also charge the seek time incurred to the group, otherwise
953 		 * if there are mutiple queues in the group, each can dispatch
954 		 * a single request on seeky media and cause lots of seek time
955 		 * and group will never know it.
956 		 */
957 		slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
958 					1);
959 	} else {
960 		slice_used = jiffies - cfqq->slice_start;
961 		if (slice_used > cfqq->allocated_slice) {
962 			*unaccounted_time = slice_used - cfqq->allocated_slice;
963 			slice_used = cfqq->allocated_slice;
964 		}
965 		if (time_after(cfqq->slice_start, cfqq->dispatch_start))
966 			*unaccounted_time += cfqq->slice_start -
967 					cfqq->dispatch_start;
968 	}
969 
970 	return slice_used;
971 }
972 
cfq_group_served(struct cfq_data * cfqd,struct cfq_group * cfqg,struct cfq_queue * cfqq)973 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
974 				struct cfq_queue *cfqq)
975 {
976 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
977 	unsigned int used_sl, charge, unaccounted_sl = 0;
978 	int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
979 			- cfqg->service_tree_idle.count;
980 
981 	BUG_ON(nr_sync < 0);
982 	used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
983 
984 	if (iops_mode(cfqd))
985 		charge = cfqq->slice_dispatch;
986 	else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
987 		charge = cfqq->allocated_slice;
988 
989 	/* Can't update vdisktime while group is on service tree */
990 	cfq_group_service_tree_del(st, cfqg);
991 	cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
992 	/* If a new weight was requested, update now, off tree */
993 	cfq_group_service_tree_add(st, cfqg);
994 
995 	/* This group is being expired. Save the context */
996 	if (time_after(cfqd->workload_expires, jiffies)) {
997 		cfqg->saved_workload_slice = cfqd->workload_expires
998 						- jiffies;
999 		cfqg->saved_workload = cfqd->serving_type;
1000 		cfqg->saved_serving_prio = cfqd->serving_prio;
1001 	} else
1002 		cfqg->saved_workload_slice = 0;
1003 
1004 	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1005 					st->min_vdisktime);
1006 	cfq_log_cfqq(cfqq->cfqd, cfqq,
1007 		     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1008 		     used_sl, cfqq->slice_dispatch, charge,
1009 		     iops_mode(cfqd), cfqq->nr_sectors);
1010 	cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
1011 					  unaccounted_sl);
1012 	cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
1013 }
1014 
1015 #ifdef CONFIG_CFQ_GROUP_IOSCHED
cfqg_of_blkg(struct blkio_group * blkg)1016 static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1017 {
1018 	if (blkg)
1019 		return container_of(blkg, struct cfq_group, blkg);
1020 	return NULL;
1021 }
1022 
cfq_update_blkio_group_weight(void * key,struct blkio_group * blkg,unsigned int weight)1023 static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1024 					  unsigned int weight)
1025 {
1026 	struct cfq_group *cfqg = cfqg_of_blkg(blkg);
1027 	cfqg->new_weight = weight;
1028 	cfqg->needs_update = true;
1029 }
1030 
cfq_init_add_cfqg_lists(struct cfq_data * cfqd,struct cfq_group * cfqg,struct blkio_cgroup * blkcg)1031 static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
1032 			struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
1033 {
1034 	struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1035 	unsigned int major, minor;
1036 
1037 	/*
1038 	 * Add group onto cgroup list. It might happen that bdi->dev is
1039 	 * not initialized yet. Initialize this new group without major
1040 	 * and minor info and this info will be filled in once a new thread
1041 	 * comes for IO.
1042 	 */
1043 	if (bdi->dev) {
1044 		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1045 		cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1046 					(void *)cfqd, MKDEV(major, minor));
1047 	} else
1048 		cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1049 					(void *)cfqd, 0);
1050 
1051 	cfqd->nr_blkcg_linked_grps++;
1052 	cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1053 
1054 	/* Add group on cfqd list */
1055 	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1056 }
1057 
1058 /*
1059  * Should be called from sleepable context. No request queue lock as per
1060  * cpu stats are allocated dynamically and alloc_percpu needs to be called
1061  * from sleepable context.
1062  */
cfq_alloc_cfqg(struct cfq_data * cfqd)1063 static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
1064 {
1065 	struct cfq_group *cfqg = NULL;
1066 	int i, j, ret;
1067 	struct cfq_rb_root *st;
1068 
1069 	cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1070 	if (!cfqg)
1071 		return NULL;
1072 
1073 	for_each_cfqg_st(cfqg, i, j, st)
1074 		*st = CFQ_RB_ROOT;
1075 	RB_CLEAR_NODE(&cfqg->rb_node);
1076 
1077 	cfqg->ttime.last_end_request = jiffies;
1078 
1079 	/*
1080 	 * Take the initial reference that will be released on destroy
1081 	 * This can be thought of a joint reference by cgroup and
1082 	 * elevator which will be dropped by either elevator exit
1083 	 * or cgroup deletion path depending on who is exiting first.
1084 	 */
1085 	cfqg->ref = 1;
1086 
1087 	ret = blkio_alloc_blkg_stats(&cfqg->blkg);
1088 	if (ret) {
1089 		kfree(cfqg);
1090 		return NULL;
1091 	}
1092 
1093 	return cfqg;
1094 }
1095 
1096 static struct cfq_group *
cfq_find_cfqg(struct cfq_data * cfqd,struct blkio_cgroup * blkcg)1097 cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1098 {
1099 	struct cfq_group *cfqg = NULL;
1100 	void *key = cfqd;
1101 	struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1102 	unsigned int major, minor;
1103 
1104 	/*
1105 	 * This is the common case when there are no blkio cgroups.
1106 	 * Avoid lookup in this case
1107 	 */
1108 	if (blkcg == &blkio_root_cgroup)
1109 		cfqg = &cfqd->root_group;
1110 	else
1111 		cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
1112 
1113 	if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1114 		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1115 		cfqg->blkg.dev = MKDEV(major, minor);
1116 	}
1117 
1118 	return cfqg;
1119 }
1120 
1121 /*
1122  * Search for the cfq group current task belongs to. request_queue lock must
1123  * be held.
1124  */
cfq_get_cfqg(struct cfq_data * cfqd)1125 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1126 {
1127 	struct blkio_cgroup *blkcg;
1128 	struct cfq_group *cfqg = NULL, *__cfqg = NULL;
1129 	struct request_queue *q = cfqd->queue;
1130 
1131 	rcu_read_lock();
1132 	blkcg = task_blkio_cgroup(current);
1133 	cfqg = cfq_find_cfqg(cfqd, blkcg);
1134 	if (cfqg) {
1135 		rcu_read_unlock();
1136 		return cfqg;
1137 	}
1138 
1139 	/*
1140 	 * Need to allocate a group. Allocation of group also needs allocation
1141 	 * of per cpu stats which in-turn takes a mutex() and can block. Hence
1142 	 * we need to drop rcu lock and queue_lock before we call alloc.
1143 	 *
1144 	 * Not taking any queue reference here and assuming that queue is
1145 	 * around by the time we return. CFQ queue allocation code does
1146 	 * the same. It might be racy though.
1147 	 */
1148 
1149 	rcu_read_unlock();
1150 	spin_unlock_irq(q->queue_lock);
1151 
1152 	cfqg = cfq_alloc_cfqg(cfqd);
1153 
1154 	spin_lock_irq(q->queue_lock);
1155 
1156 	rcu_read_lock();
1157 	blkcg = task_blkio_cgroup(current);
1158 
1159 	/*
1160 	 * If some other thread already allocated the group while we were
1161 	 * not holding queue lock, free up the group
1162 	 */
1163 	__cfqg = cfq_find_cfqg(cfqd, blkcg);
1164 
1165 	if (__cfqg) {
1166 		kfree(cfqg);
1167 		rcu_read_unlock();
1168 		return __cfqg;
1169 	}
1170 
1171 	if (!cfqg)
1172 		cfqg = &cfqd->root_group;
1173 
1174 	cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
1175 	rcu_read_unlock();
1176 	return cfqg;
1177 }
1178 
cfq_ref_get_cfqg(struct cfq_group * cfqg)1179 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1180 {
1181 	cfqg->ref++;
1182 	return cfqg;
1183 }
1184 
cfq_link_cfqq_cfqg(struct cfq_queue * cfqq,struct cfq_group * cfqg)1185 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1186 {
1187 	/* Currently, all async queues are mapped to root group */
1188 	if (!cfq_cfqq_sync(cfqq))
1189 		cfqg = &cfqq->cfqd->root_group;
1190 
1191 	cfqq->cfqg = cfqg;
1192 	/* cfqq reference on cfqg */
1193 	cfqq->cfqg->ref++;
1194 }
1195 
cfq_put_cfqg(struct cfq_group * cfqg)1196 static void cfq_put_cfqg(struct cfq_group *cfqg)
1197 {
1198 	struct cfq_rb_root *st;
1199 	int i, j;
1200 
1201 	BUG_ON(cfqg->ref <= 0);
1202 	cfqg->ref--;
1203 	if (cfqg->ref)
1204 		return;
1205 	for_each_cfqg_st(cfqg, i, j, st)
1206 		BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1207 	free_percpu(cfqg->blkg.stats_cpu);
1208 	kfree(cfqg);
1209 }
1210 
cfq_destroy_cfqg(struct cfq_data * cfqd,struct cfq_group * cfqg)1211 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1212 {
1213 	/* Something wrong if we are trying to remove same group twice */
1214 	BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1215 
1216 	hlist_del_init(&cfqg->cfqd_node);
1217 
1218 	BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
1219 	cfqd->nr_blkcg_linked_grps--;
1220 
1221 	/*
1222 	 * Put the reference taken at the time of creation so that when all
1223 	 * queues are gone, group can be destroyed.
1224 	 */
1225 	cfq_put_cfqg(cfqg);
1226 }
1227 
cfq_release_cfq_groups(struct cfq_data * cfqd)1228 static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1229 {
1230 	struct hlist_node *pos, *n;
1231 	struct cfq_group *cfqg;
1232 
1233 	hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1234 		/*
1235 		 * If cgroup removal path got to blk_group first and removed
1236 		 * it from cgroup list, then it will take care of destroying
1237 		 * cfqg also.
1238 		 */
1239 		if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1240 			cfq_destroy_cfqg(cfqd, cfqg);
1241 	}
1242 }
1243 
1244 /*
1245  * Blk cgroup controller notification saying that blkio_group object is being
1246  * delinked as associated cgroup object is going away. That also means that
1247  * no new IO will come in this group. So get rid of this group as soon as
1248  * any pending IO in the group is finished.
1249  *
1250  * This function is called under rcu_read_lock(). key is the rcu protected
1251  * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1252  * read lock.
1253  *
1254  * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1255  * it should not be NULL as even if elevator was exiting, cgroup deltion
1256  * path got to it first.
1257  */
cfq_unlink_blkio_group(void * key,struct blkio_group * blkg)1258 static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1259 {
1260 	unsigned long  flags;
1261 	struct cfq_data *cfqd = key;
1262 
1263 	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1264 	cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1265 	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1266 }
1267 
1268 #else /* GROUP_IOSCHED */
cfq_get_cfqg(struct cfq_data * cfqd)1269 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1270 {
1271 	return &cfqd->root_group;
1272 }
1273 
cfq_ref_get_cfqg(struct cfq_group * cfqg)1274 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1275 {
1276 	return cfqg;
1277 }
1278 
1279 static inline void
cfq_link_cfqq_cfqg(struct cfq_queue * cfqq,struct cfq_group * cfqg)1280 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1281 	cfqq->cfqg = cfqg;
1282 }
1283 
cfq_release_cfq_groups(struct cfq_data * cfqd)1284 static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
cfq_put_cfqg(struct cfq_group * cfqg)1285 static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1286 
1287 #endif /* GROUP_IOSCHED */
1288 
1289 /*
1290  * The cfqd->service_trees holds all pending cfq_queue's that have
1291  * requests waiting to be processed. It is sorted in the order that
1292  * we will service the queues.
1293  */
cfq_service_tree_add(struct cfq_data * cfqd,struct cfq_queue * cfqq,bool add_front)1294 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1295 				 bool add_front)
1296 {
1297 	struct rb_node **p, *parent;
1298 	struct cfq_queue *__cfqq;
1299 	unsigned long rb_key;
1300 	struct cfq_rb_root *service_tree;
1301 	int left;
1302 	int new_cfqq = 1;
1303 
1304 	service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1305 						cfqq_type(cfqq));
1306 	if (cfq_class_idle(cfqq)) {
1307 		rb_key = CFQ_IDLE_DELAY;
1308 		parent = rb_last(&service_tree->rb);
1309 		if (parent && parent != &cfqq->rb_node) {
1310 			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1311 			rb_key += __cfqq->rb_key;
1312 		} else
1313 			rb_key += jiffies;
1314 	} else if (!add_front) {
1315 		/*
1316 		 * Get our rb key offset. Subtract any residual slice
1317 		 * value carried from last service. A negative resid
1318 		 * count indicates slice overrun, and this should position
1319 		 * the next service time further away in the tree.
1320 		 */
1321 		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1322 		rb_key -= cfqq->slice_resid;
1323 		cfqq->slice_resid = 0;
1324 	} else {
1325 		rb_key = -HZ;
1326 		__cfqq = cfq_rb_first(service_tree);
1327 		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1328 	}
1329 
1330 	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1331 		new_cfqq = 0;
1332 		/*
1333 		 * same position, nothing more to do
1334 		 */
1335 		if (rb_key == cfqq->rb_key &&
1336 		    cfqq->service_tree == service_tree)
1337 			return;
1338 
1339 		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1340 		cfqq->service_tree = NULL;
1341 	}
1342 
1343 	left = 1;
1344 	parent = NULL;
1345 	cfqq->service_tree = service_tree;
1346 	p = &service_tree->rb.rb_node;
1347 	while (*p) {
1348 		struct rb_node **n;
1349 
1350 		parent = *p;
1351 		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1352 
1353 		/*
1354 		 * sort by key, that represents service time.
1355 		 */
1356 		if (time_before(rb_key, __cfqq->rb_key))
1357 			n = &(*p)->rb_left;
1358 		else {
1359 			n = &(*p)->rb_right;
1360 			left = 0;
1361 		}
1362 
1363 		p = n;
1364 	}
1365 
1366 	if (left)
1367 		service_tree->left = &cfqq->rb_node;
1368 
1369 	cfqq->rb_key = rb_key;
1370 	rb_link_node(&cfqq->rb_node, parent, p);
1371 	rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1372 	service_tree->count++;
1373 	if (add_front || !new_cfqq)
1374 		return;
1375 	cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1376 }
1377 
1378 static struct cfq_queue *
cfq_prio_tree_lookup(struct cfq_data * cfqd,struct rb_root * root,sector_t sector,struct rb_node ** ret_parent,struct rb_node *** rb_link)1379 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1380 		     sector_t sector, struct rb_node **ret_parent,
1381 		     struct rb_node ***rb_link)
1382 {
1383 	struct rb_node **p, *parent;
1384 	struct cfq_queue *cfqq = NULL;
1385 
1386 	parent = NULL;
1387 	p = &root->rb_node;
1388 	while (*p) {
1389 		struct rb_node **n;
1390 
1391 		parent = *p;
1392 		cfqq = rb_entry(parent, struct cfq_queue, p_node);
1393 
1394 		/*
1395 		 * Sort strictly based on sector.  Smallest to the left,
1396 		 * largest to the right.
1397 		 */
1398 		if (sector > blk_rq_pos(cfqq->next_rq))
1399 			n = &(*p)->rb_right;
1400 		else if (sector < blk_rq_pos(cfqq->next_rq))
1401 			n = &(*p)->rb_left;
1402 		else
1403 			break;
1404 		p = n;
1405 		cfqq = NULL;
1406 	}
1407 
1408 	*ret_parent = parent;
1409 	if (rb_link)
1410 		*rb_link = p;
1411 	return cfqq;
1412 }
1413 
cfq_prio_tree_add(struct cfq_data * cfqd,struct cfq_queue * cfqq)1414 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1415 {
1416 	struct rb_node **p, *parent;
1417 	struct cfq_queue *__cfqq;
1418 
1419 	if (cfqq->p_root) {
1420 		rb_erase(&cfqq->p_node, cfqq->p_root);
1421 		cfqq->p_root = NULL;
1422 	}
1423 
1424 	if (cfq_class_idle(cfqq))
1425 		return;
1426 	if (!cfqq->next_rq)
1427 		return;
1428 
1429 	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1430 	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1431 				      blk_rq_pos(cfqq->next_rq), &parent, &p);
1432 	if (!__cfqq) {
1433 		rb_link_node(&cfqq->p_node, parent, p);
1434 		rb_insert_color(&cfqq->p_node, cfqq->p_root);
1435 	} else
1436 		cfqq->p_root = NULL;
1437 }
1438 
1439 /*
1440  * Update cfqq's position in the service tree.
1441  */
cfq_resort_rr_list(struct cfq_data * cfqd,struct cfq_queue * cfqq)1442 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1443 {
1444 	/*
1445 	 * Resorting requires the cfqq to be on the RR list already.
1446 	 */
1447 	if (cfq_cfqq_on_rr(cfqq)) {
1448 		cfq_service_tree_add(cfqd, cfqq, 0);
1449 		cfq_prio_tree_add(cfqd, cfqq);
1450 	}
1451 }
1452 
1453 /*
1454  * add to busy list of queues for service, trying to be fair in ordering
1455  * the pending list according to last request service
1456  */
cfq_add_cfqq_rr(struct cfq_data * cfqd,struct cfq_queue * cfqq)1457 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1458 {
1459 	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1460 	BUG_ON(cfq_cfqq_on_rr(cfqq));
1461 	cfq_mark_cfqq_on_rr(cfqq);
1462 	cfqd->busy_queues++;
1463 	if (cfq_cfqq_sync(cfqq))
1464 		cfqd->busy_sync_queues++;
1465 
1466 	cfq_resort_rr_list(cfqd, cfqq);
1467 }
1468 
1469 /*
1470  * Called when the cfqq no longer has requests pending, remove it from
1471  * the service tree.
1472  */
cfq_del_cfqq_rr(struct cfq_data * cfqd,struct cfq_queue * cfqq)1473 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1474 {
1475 	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1476 	BUG_ON(!cfq_cfqq_on_rr(cfqq));
1477 	cfq_clear_cfqq_on_rr(cfqq);
1478 
1479 	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1480 		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1481 		cfqq->service_tree = NULL;
1482 	}
1483 	if (cfqq->p_root) {
1484 		rb_erase(&cfqq->p_node, cfqq->p_root);
1485 		cfqq->p_root = NULL;
1486 	}
1487 
1488 	cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1489 	BUG_ON(!cfqd->busy_queues);
1490 	cfqd->busy_queues--;
1491 	if (cfq_cfqq_sync(cfqq))
1492 		cfqd->busy_sync_queues--;
1493 }
1494 
1495 /*
1496  * rb tree support functions
1497  */
cfq_del_rq_rb(struct request * rq)1498 static void cfq_del_rq_rb(struct request *rq)
1499 {
1500 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1501 	const int sync = rq_is_sync(rq);
1502 
1503 	BUG_ON(!cfqq->queued[sync]);
1504 	cfqq->queued[sync]--;
1505 
1506 	elv_rb_del(&cfqq->sort_list, rq);
1507 
1508 	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1509 		/*
1510 		 * Queue will be deleted from service tree when we actually
1511 		 * expire it later. Right now just remove it from prio tree
1512 		 * as it is empty.
1513 		 */
1514 		if (cfqq->p_root) {
1515 			rb_erase(&cfqq->p_node, cfqq->p_root);
1516 			cfqq->p_root = NULL;
1517 		}
1518 	}
1519 }
1520 
cfq_add_rq_rb(struct request * rq)1521 static void cfq_add_rq_rb(struct request *rq)
1522 {
1523 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1524 	struct cfq_data *cfqd = cfqq->cfqd;
1525 	struct request *prev;
1526 
1527 	cfqq->queued[rq_is_sync(rq)]++;
1528 
1529 	elv_rb_add(&cfqq->sort_list, rq);
1530 
1531 	if (!cfq_cfqq_on_rr(cfqq))
1532 		cfq_add_cfqq_rr(cfqd, cfqq);
1533 
1534 	/*
1535 	 * check if this request is a better next-serve candidate
1536 	 */
1537 	prev = cfqq->next_rq;
1538 	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1539 
1540 	/*
1541 	 * adjust priority tree position, if ->next_rq changes
1542 	 */
1543 	if (prev != cfqq->next_rq)
1544 		cfq_prio_tree_add(cfqd, cfqq);
1545 
1546 	BUG_ON(!cfqq->next_rq);
1547 }
1548 
cfq_reposition_rq_rb(struct cfq_queue * cfqq,struct request * rq)1549 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1550 {
1551 	elv_rb_del(&cfqq->sort_list, rq);
1552 	cfqq->queued[rq_is_sync(rq)]--;
1553 	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1554 					rq_data_dir(rq), rq_is_sync(rq));
1555 	cfq_add_rq_rb(rq);
1556 	cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1557 			&cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1558 			rq_is_sync(rq));
1559 }
1560 
1561 static struct request *
cfq_find_rq_fmerge(struct cfq_data * cfqd,struct bio * bio)1562 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1563 {
1564 	struct task_struct *tsk = current;
1565 	struct cfq_io_cq *cic;
1566 	struct cfq_queue *cfqq;
1567 
1568 	cic = cfq_cic_lookup(cfqd, tsk->io_context);
1569 	if (!cic)
1570 		return NULL;
1571 
1572 	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1573 	if (cfqq) {
1574 		sector_t sector = bio->bi_sector + bio_sectors(bio);
1575 
1576 		return elv_rb_find(&cfqq->sort_list, sector);
1577 	}
1578 
1579 	return NULL;
1580 }
1581 
cfq_activate_request(struct request_queue * q,struct request * rq)1582 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1583 {
1584 	struct cfq_data *cfqd = q->elevator->elevator_data;
1585 
1586 	cfqd->rq_in_driver++;
1587 	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1588 						cfqd->rq_in_driver);
1589 
1590 	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1591 }
1592 
cfq_deactivate_request(struct request_queue * q,struct request * rq)1593 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1594 {
1595 	struct cfq_data *cfqd = q->elevator->elevator_data;
1596 
1597 	WARN_ON(!cfqd->rq_in_driver);
1598 	cfqd->rq_in_driver--;
1599 	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1600 						cfqd->rq_in_driver);
1601 }
1602 
cfq_remove_request(struct request * rq)1603 static void cfq_remove_request(struct request *rq)
1604 {
1605 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1606 
1607 	if (cfqq->next_rq == rq)
1608 		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1609 
1610 	list_del_init(&rq->queuelist);
1611 	cfq_del_rq_rb(rq);
1612 
1613 	cfqq->cfqd->rq_queued--;
1614 	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1615 					rq_data_dir(rq), rq_is_sync(rq));
1616 	if (rq->cmd_flags & REQ_PRIO) {
1617 		WARN_ON(!cfqq->prio_pending);
1618 		cfqq->prio_pending--;
1619 	}
1620 }
1621 
cfq_merge(struct request_queue * q,struct request ** req,struct bio * bio)1622 static int cfq_merge(struct request_queue *q, struct request **req,
1623 		     struct bio *bio)
1624 {
1625 	struct cfq_data *cfqd = q->elevator->elevator_data;
1626 	struct request *__rq;
1627 
1628 	__rq = cfq_find_rq_fmerge(cfqd, bio);
1629 	if (__rq && elv_rq_merge_ok(__rq, bio)) {
1630 		*req = __rq;
1631 		return ELEVATOR_FRONT_MERGE;
1632 	}
1633 
1634 	return ELEVATOR_NO_MERGE;
1635 }
1636 
cfq_merged_request(struct request_queue * q,struct request * req,int type)1637 static void cfq_merged_request(struct request_queue *q, struct request *req,
1638 			       int type)
1639 {
1640 	if (type == ELEVATOR_FRONT_MERGE) {
1641 		struct cfq_queue *cfqq = RQ_CFQQ(req);
1642 
1643 		cfq_reposition_rq_rb(cfqq, req);
1644 	}
1645 }
1646 
cfq_bio_merged(struct request_queue * q,struct request * req,struct bio * bio)1647 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1648 				struct bio *bio)
1649 {
1650 	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1651 					bio_data_dir(bio), cfq_bio_sync(bio));
1652 }
1653 
1654 static void
cfq_merged_requests(struct request_queue * q,struct request * rq,struct request * next)1655 cfq_merged_requests(struct request_queue *q, struct request *rq,
1656 		    struct request *next)
1657 {
1658 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1659 	struct cfq_data *cfqd = q->elevator->elevator_data;
1660 
1661 	/*
1662 	 * reposition in fifo if next is older than rq
1663 	 */
1664 	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1665 	    time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1666 		list_move(&rq->queuelist, &next->queuelist);
1667 		rq_set_fifo_time(rq, rq_fifo_time(next));
1668 	}
1669 
1670 	if (cfqq->next_rq == next)
1671 		cfqq->next_rq = rq;
1672 	cfq_remove_request(next);
1673 	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1674 					rq_data_dir(next), rq_is_sync(next));
1675 
1676 	cfqq = RQ_CFQQ(next);
1677 	/*
1678 	 * all requests of this queue are merged to other queues, delete it
1679 	 * from the service tree. If it's the active_queue,
1680 	 * cfq_dispatch_requests() will choose to expire it or do idle
1681 	 */
1682 	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
1683 	    cfqq != cfqd->active_queue)
1684 		cfq_del_cfqq_rr(cfqd, cfqq);
1685 }
1686 
cfq_allow_merge(struct request_queue * q,struct request * rq,struct bio * bio)1687 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1688 			   struct bio *bio)
1689 {
1690 	struct cfq_data *cfqd = q->elevator->elevator_data;
1691 	struct cfq_io_cq *cic;
1692 	struct cfq_queue *cfqq;
1693 
1694 	/*
1695 	 * Disallow merge of a sync bio into an async request.
1696 	 */
1697 	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1698 		return false;
1699 
1700 	/*
1701 	 * Lookup the cfqq that this bio will be queued with and allow
1702 	 * merge only if rq is queued there.
1703 	 */
1704 	cic = cfq_cic_lookup(cfqd, current->io_context);
1705 	if (!cic)
1706 		return false;
1707 
1708 	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1709 	return cfqq == RQ_CFQQ(rq);
1710 }
1711 
cfq_del_timer(struct cfq_data * cfqd,struct cfq_queue * cfqq)1712 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1713 {
1714 	del_timer(&cfqd->idle_slice_timer);
1715 	cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1716 }
1717 
__cfq_set_active_queue(struct cfq_data * cfqd,struct cfq_queue * cfqq)1718 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1719 				   struct cfq_queue *cfqq)
1720 {
1721 	if (cfqq) {
1722 		cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1723 				cfqd->serving_prio, cfqd->serving_type);
1724 		cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1725 		cfqq->slice_start = 0;
1726 		cfqq->dispatch_start = jiffies;
1727 		cfqq->allocated_slice = 0;
1728 		cfqq->slice_end = 0;
1729 		cfqq->slice_dispatch = 0;
1730 		cfqq->nr_sectors = 0;
1731 
1732 		cfq_clear_cfqq_wait_request(cfqq);
1733 		cfq_clear_cfqq_must_dispatch(cfqq);
1734 		cfq_clear_cfqq_must_alloc_slice(cfqq);
1735 		cfq_clear_cfqq_fifo_expire(cfqq);
1736 		cfq_mark_cfqq_slice_new(cfqq);
1737 
1738 		cfq_del_timer(cfqd, cfqq);
1739 	}
1740 
1741 	cfqd->active_queue = cfqq;
1742 }
1743 
1744 /*
1745  * current cfqq expired its slice (or was too idle), select new one
1746  */
1747 static void
__cfq_slice_expired(struct cfq_data * cfqd,struct cfq_queue * cfqq,bool timed_out)1748 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1749 		    bool timed_out)
1750 {
1751 	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1752 
1753 	if (cfq_cfqq_wait_request(cfqq))
1754 		cfq_del_timer(cfqd, cfqq);
1755 
1756 	cfq_clear_cfqq_wait_request(cfqq);
1757 	cfq_clear_cfqq_wait_busy(cfqq);
1758 
1759 	/*
1760 	 * If this cfqq is shared between multiple processes, check to
1761 	 * make sure that those processes are still issuing I/Os within
1762 	 * the mean seek distance.  If not, it may be time to break the
1763 	 * queues apart again.
1764 	 */
1765 	if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1766 		cfq_mark_cfqq_split_coop(cfqq);
1767 
1768 	/*
1769 	 * store what was left of this slice, if the queue idled/timed out
1770 	 */
1771 	if (timed_out) {
1772 		if (cfq_cfqq_slice_new(cfqq))
1773 			cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1774 		else
1775 			cfqq->slice_resid = cfqq->slice_end - jiffies;
1776 		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1777 	}
1778 
1779 	cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1780 
1781 	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1782 		cfq_del_cfqq_rr(cfqd, cfqq);
1783 
1784 	cfq_resort_rr_list(cfqd, cfqq);
1785 
1786 	if (cfqq == cfqd->active_queue)
1787 		cfqd->active_queue = NULL;
1788 
1789 	if (cfqd->active_cic) {
1790 		put_io_context(cfqd->active_cic->icq.ioc);
1791 		cfqd->active_cic = NULL;
1792 	}
1793 }
1794 
cfq_slice_expired(struct cfq_data * cfqd,bool timed_out)1795 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1796 {
1797 	struct cfq_queue *cfqq = cfqd->active_queue;
1798 
1799 	if (cfqq)
1800 		__cfq_slice_expired(cfqd, cfqq, timed_out);
1801 }
1802 
1803 /*
1804  * Get next queue for service. Unless we have a queue preemption,
1805  * we'll simply select the first cfqq in the service tree.
1806  */
cfq_get_next_queue(struct cfq_data * cfqd)1807 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1808 {
1809 	struct cfq_rb_root *service_tree =
1810 		service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1811 					cfqd->serving_type);
1812 
1813 	if (!cfqd->rq_queued)
1814 		return NULL;
1815 
1816 	/* There is nothing to dispatch */
1817 	if (!service_tree)
1818 		return NULL;
1819 	if (RB_EMPTY_ROOT(&service_tree->rb))
1820 		return NULL;
1821 	return cfq_rb_first(service_tree);
1822 }
1823 
cfq_get_next_queue_forced(struct cfq_data * cfqd)1824 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1825 {
1826 	struct cfq_group *cfqg;
1827 	struct cfq_queue *cfqq;
1828 	int i, j;
1829 	struct cfq_rb_root *st;
1830 
1831 	if (!cfqd->rq_queued)
1832 		return NULL;
1833 
1834 	cfqg = cfq_get_next_cfqg(cfqd);
1835 	if (!cfqg)
1836 		return NULL;
1837 
1838 	for_each_cfqg_st(cfqg, i, j, st)
1839 		if ((cfqq = cfq_rb_first(st)) != NULL)
1840 			return cfqq;
1841 	return NULL;
1842 }
1843 
1844 /*
1845  * Get and set a new active queue for service.
1846  */
cfq_set_active_queue(struct cfq_data * cfqd,struct cfq_queue * cfqq)1847 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1848 					      struct cfq_queue *cfqq)
1849 {
1850 	if (!cfqq)
1851 		cfqq = cfq_get_next_queue(cfqd);
1852 
1853 	__cfq_set_active_queue(cfqd, cfqq);
1854 	return cfqq;
1855 }
1856 
cfq_dist_from_last(struct cfq_data * cfqd,struct request * rq)1857 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1858 					  struct request *rq)
1859 {
1860 	if (blk_rq_pos(rq) >= cfqd->last_position)
1861 		return blk_rq_pos(rq) - cfqd->last_position;
1862 	else
1863 		return cfqd->last_position - blk_rq_pos(rq);
1864 }
1865 
cfq_rq_close(struct cfq_data * cfqd,struct cfq_queue * cfqq,struct request * rq)1866 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1867 			       struct request *rq)
1868 {
1869 	return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1870 }
1871 
cfqq_close(struct cfq_data * cfqd,struct cfq_queue * cur_cfqq)1872 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1873 				    struct cfq_queue *cur_cfqq)
1874 {
1875 	struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1876 	struct rb_node *parent, *node;
1877 	struct cfq_queue *__cfqq;
1878 	sector_t sector = cfqd->last_position;
1879 
1880 	if (RB_EMPTY_ROOT(root))
1881 		return NULL;
1882 
1883 	/*
1884 	 * First, if we find a request starting at the end of the last
1885 	 * request, choose it.
1886 	 */
1887 	__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1888 	if (__cfqq)
1889 		return __cfqq;
1890 
1891 	/*
1892 	 * If the exact sector wasn't found, the parent of the NULL leaf
1893 	 * will contain the closest sector.
1894 	 */
1895 	__cfqq = rb_entry(parent, struct cfq_queue, p_node);
1896 	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1897 		return __cfqq;
1898 
1899 	if (blk_rq_pos(__cfqq->next_rq) < sector)
1900 		node = rb_next(&__cfqq->p_node);
1901 	else
1902 		node = rb_prev(&__cfqq->p_node);
1903 	if (!node)
1904 		return NULL;
1905 
1906 	__cfqq = rb_entry(node, struct cfq_queue, p_node);
1907 	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1908 		return __cfqq;
1909 
1910 	return NULL;
1911 }
1912 
1913 /*
1914  * cfqd - obvious
1915  * cur_cfqq - passed in so that we don't decide that the current queue is
1916  * 	      closely cooperating with itself.
1917  *
1918  * So, basically we're assuming that that cur_cfqq has dispatched at least
1919  * one request, and that cfqd->last_position reflects a position on the disk
1920  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1921  * assumption.
1922  */
cfq_close_cooperator(struct cfq_data * cfqd,struct cfq_queue * cur_cfqq)1923 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1924 					      struct cfq_queue *cur_cfqq)
1925 {
1926 	struct cfq_queue *cfqq;
1927 
1928 	if (cfq_class_idle(cur_cfqq))
1929 		return NULL;
1930 	if (!cfq_cfqq_sync(cur_cfqq))
1931 		return NULL;
1932 	if (CFQQ_SEEKY(cur_cfqq))
1933 		return NULL;
1934 
1935 	/*
1936 	 * Don't search priority tree if it's the only queue in the group.
1937 	 */
1938 	if (cur_cfqq->cfqg->nr_cfqq == 1)
1939 		return NULL;
1940 
1941 	/*
1942 	 * We should notice if some of the queues are cooperating, eg
1943 	 * working closely on the same area of the disk. In that case,
1944 	 * we can group them together and don't waste time idling.
1945 	 */
1946 	cfqq = cfqq_close(cfqd, cur_cfqq);
1947 	if (!cfqq)
1948 		return NULL;
1949 
1950 	/* If new queue belongs to different cfq_group, don't choose it */
1951 	if (cur_cfqq->cfqg != cfqq->cfqg)
1952 		return NULL;
1953 
1954 	/*
1955 	 * It only makes sense to merge sync queues.
1956 	 */
1957 	if (!cfq_cfqq_sync(cfqq))
1958 		return NULL;
1959 	if (CFQQ_SEEKY(cfqq))
1960 		return NULL;
1961 
1962 	/*
1963 	 * Do not merge queues of different priority classes
1964 	 */
1965 	if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1966 		return NULL;
1967 
1968 	return cfqq;
1969 }
1970 
1971 /*
1972  * Determine whether we should enforce idle window for this queue.
1973  */
1974 
cfq_should_idle(struct cfq_data * cfqd,struct cfq_queue * cfqq)1975 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1976 {
1977 	enum wl_prio_t prio = cfqq_prio(cfqq);
1978 	struct cfq_rb_root *service_tree = cfqq->service_tree;
1979 
1980 	BUG_ON(!service_tree);
1981 	BUG_ON(!service_tree->count);
1982 
1983 	if (!cfqd->cfq_slice_idle)
1984 		return false;
1985 
1986 	/* We never do for idle class queues. */
1987 	if (prio == IDLE_WORKLOAD)
1988 		return false;
1989 
1990 	/* We do for queues that were marked with idle window flag. */
1991 	if (cfq_cfqq_idle_window(cfqq) &&
1992 	   !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1993 		return true;
1994 
1995 	/*
1996 	 * Otherwise, we do only if they are the last ones
1997 	 * in their service tree.
1998 	 */
1999 	if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
2000 	   !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
2001 		return true;
2002 	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
2003 			service_tree->count);
2004 	return false;
2005 }
2006 
cfq_arm_slice_timer(struct cfq_data * cfqd)2007 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2008 {
2009 	struct cfq_queue *cfqq = cfqd->active_queue;
2010 	struct cfq_io_cq *cic;
2011 	unsigned long sl, group_idle = 0;
2012 
2013 	/*
2014 	 * SSD device without seek penalty, disable idling. But only do so
2015 	 * for devices that support queuing, otherwise we still have a problem
2016 	 * with sync vs async workloads.
2017 	 */
2018 	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2019 		return;
2020 
2021 	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2022 	WARN_ON(cfq_cfqq_slice_new(cfqq));
2023 
2024 	/*
2025 	 * idle is disabled, either manually or by past process history
2026 	 */
2027 	if (!cfq_should_idle(cfqd, cfqq)) {
2028 		/* no queue idling. Check for group idling */
2029 		if (cfqd->cfq_group_idle)
2030 			group_idle = cfqd->cfq_group_idle;
2031 		else
2032 			return;
2033 	}
2034 
2035 	/*
2036 	 * still active requests from this queue, don't idle
2037 	 */
2038 	if (cfqq->dispatched)
2039 		return;
2040 
2041 	/*
2042 	 * task has exited, don't wait
2043 	 */
2044 	cic = cfqd->active_cic;
2045 	if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
2046 		return;
2047 
2048 	/*
2049 	 * If our average think time is larger than the remaining time
2050 	 * slice, then don't idle. This avoids overrunning the allotted
2051 	 * time slice.
2052 	 */
2053 	if (sample_valid(cic->ttime.ttime_samples) &&
2054 	    (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2055 		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2056 			     cic->ttime.ttime_mean);
2057 		return;
2058 	}
2059 
2060 	/* There are other queues in the group, don't do group idle */
2061 	if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2062 		return;
2063 
2064 	cfq_mark_cfqq_wait_request(cfqq);
2065 
2066 	if (group_idle)
2067 		sl = cfqd->cfq_group_idle;
2068 	else
2069 		sl = cfqd->cfq_slice_idle;
2070 
2071 	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2072 	cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
2073 	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2074 			group_idle ? 1 : 0);
2075 }
2076 
2077 /*
2078  * Move request from internal lists to the request queue dispatch list.
2079  */
cfq_dispatch_insert(struct request_queue * q,struct request * rq)2080 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2081 {
2082 	struct cfq_data *cfqd = q->elevator->elevator_data;
2083 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2084 
2085 	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2086 
2087 	cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2088 	cfq_remove_request(rq);
2089 	cfqq->dispatched++;
2090 	(RQ_CFQG(rq))->dispatched++;
2091 	elv_dispatch_sort(q, rq);
2092 
2093 	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2094 	cfqq->nr_sectors += blk_rq_sectors(rq);
2095 	cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
2096 					rq_data_dir(rq), rq_is_sync(rq));
2097 }
2098 
2099 /*
2100  * return expired entry, or NULL to just start from scratch in rbtree
2101  */
cfq_check_fifo(struct cfq_queue * cfqq)2102 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2103 {
2104 	struct request *rq = NULL;
2105 
2106 	if (cfq_cfqq_fifo_expire(cfqq))
2107 		return NULL;
2108 
2109 	cfq_mark_cfqq_fifo_expire(cfqq);
2110 
2111 	if (list_empty(&cfqq->fifo))
2112 		return NULL;
2113 
2114 	rq = rq_entry_fifo(cfqq->fifo.next);
2115 	if (time_before(jiffies, rq_fifo_time(rq)))
2116 		rq = NULL;
2117 
2118 	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2119 	return rq;
2120 }
2121 
2122 static inline int
cfq_prio_to_maxrq(struct cfq_data * cfqd,struct cfq_queue * cfqq)2123 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2124 {
2125 	const int base_rq = cfqd->cfq_slice_async_rq;
2126 
2127 	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2128 
2129 	return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2130 }
2131 
2132 /*
2133  * Must be called with the queue_lock held.
2134  */
cfqq_process_refs(struct cfq_queue * cfqq)2135 static int cfqq_process_refs(struct cfq_queue *cfqq)
2136 {
2137 	int process_refs, io_refs;
2138 
2139 	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2140 	process_refs = cfqq->ref - io_refs;
2141 	BUG_ON(process_refs < 0);
2142 	return process_refs;
2143 }
2144 
cfq_setup_merge(struct cfq_queue * cfqq,struct cfq_queue * new_cfqq)2145 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2146 {
2147 	int process_refs, new_process_refs;
2148 	struct cfq_queue *__cfqq;
2149 
2150 	/*
2151 	 * If there are no process references on the new_cfqq, then it is
2152 	 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2153 	 * chain may have dropped their last reference (not just their
2154 	 * last process reference).
2155 	 */
2156 	if (!cfqq_process_refs(new_cfqq))
2157 		return;
2158 
2159 	/* Avoid a circular list and skip interim queue merges */
2160 	while ((__cfqq = new_cfqq->new_cfqq)) {
2161 		if (__cfqq == cfqq)
2162 			return;
2163 		new_cfqq = __cfqq;
2164 	}
2165 
2166 	process_refs = cfqq_process_refs(cfqq);
2167 	new_process_refs = cfqq_process_refs(new_cfqq);
2168 	/*
2169 	 * If the process for the cfqq has gone away, there is no
2170 	 * sense in merging the queues.
2171 	 */
2172 	if (process_refs == 0 || new_process_refs == 0)
2173 		return;
2174 
2175 	/*
2176 	 * Merge in the direction of the lesser amount of work.
2177 	 */
2178 	if (new_process_refs >= process_refs) {
2179 		cfqq->new_cfqq = new_cfqq;
2180 		new_cfqq->ref += process_refs;
2181 	} else {
2182 		new_cfqq->new_cfqq = cfqq;
2183 		cfqq->ref += new_process_refs;
2184 	}
2185 }
2186 
cfq_choose_wl(struct cfq_data * cfqd,struct cfq_group * cfqg,enum wl_prio_t prio)2187 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2188 				struct cfq_group *cfqg, enum wl_prio_t prio)
2189 {
2190 	struct cfq_queue *queue;
2191 	int i;
2192 	bool key_valid = false;
2193 	unsigned long lowest_key = 0;
2194 	enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2195 
2196 	for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2197 		/* select the one with lowest rb_key */
2198 		queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2199 		if (queue &&
2200 		    (!key_valid || time_before(queue->rb_key, lowest_key))) {
2201 			lowest_key = queue->rb_key;
2202 			cur_best = i;
2203 			key_valid = true;
2204 		}
2205 	}
2206 
2207 	return cur_best;
2208 }
2209 
choose_service_tree(struct cfq_data * cfqd,struct cfq_group * cfqg)2210 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2211 {
2212 	unsigned slice;
2213 	unsigned count;
2214 	struct cfq_rb_root *st;
2215 	unsigned group_slice;
2216 	enum wl_prio_t original_prio = cfqd->serving_prio;
2217 
2218 	/* Choose next priority. RT > BE > IDLE */
2219 	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2220 		cfqd->serving_prio = RT_WORKLOAD;
2221 	else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2222 		cfqd->serving_prio = BE_WORKLOAD;
2223 	else {
2224 		cfqd->serving_prio = IDLE_WORKLOAD;
2225 		cfqd->workload_expires = jiffies + 1;
2226 		return;
2227 	}
2228 
2229 	if (original_prio != cfqd->serving_prio)
2230 		goto new_workload;
2231 
2232 	/*
2233 	 * For RT and BE, we have to choose also the type
2234 	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2235 	 * expiration time
2236 	 */
2237 	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2238 	count = st->count;
2239 
2240 	/*
2241 	 * check workload expiration, and that we still have other queues ready
2242 	 */
2243 	if (count && !time_after(jiffies, cfqd->workload_expires))
2244 		return;
2245 
2246 new_workload:
2247 	/* otherwise select new workload type */
2248 	cfqd->serving_type =
2249 		cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2250 	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2251 	count = st->count;
2252 
2253 	/*
2254 	 * the workload slice is computed as a fraction of target latency
2255 	 * proportional to the number of queues in that workload, over
2256 	 * all the queues in the same priority class
2257 	 */
2258 	group_slice = cfq_group_slice(cfqd, cfqg);
2259 
2260 	slice = group_slice * count /
2261 		max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2262 		      cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2263 
2264 	if (cfqd->serving_type == ASYNC_WORKLOAD) {
2265 		unsigned int tmp;
2266 
2267 		/*
2268 		 * Async queues are currently system wide. Just taking
2269 		 * proportion of queues with-in same group will lead to higher
2270 		 * async ratio system wide as generally root group is going
2271 		 * to have higher weight. A more accurate thing would be to
2272 		 * calculate system wide asnc/sync ratio.
2273 		 */
2274 		tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2275 		tmp = tmp/cfqd->busy_queues;
2276 		slice = min_t(unsigned, slice, tmp);
2277 
2278 		/* async workload slice is scaled down according to
2279 		 * the sync/async slice ratio. */
2280 		slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2281 	} else
2282 		/* sync workload slice is at least 2 * cfq_slice_idle */
2283 		slice = max(slice, 2 * cfqd->cfq_slice_idle);
2284 
2285 	slice = max_t(unsigned, slice, CFQ_MIN_TT);
2286 	cfq_log(cfqd, "workload slice:%d", slice);
2287 	cfqd->workload_expires = jiffies + slice;
2288 }
2289 
cfq_get_next_cfqg(struct cfq_data * cfqd)2290 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2291 {
2292 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
2293 	struct cfq_group *cfqg;
2294 
2295 	if (RB_EMPTY_ROOT(&st->rb))
2296 		return NULL;
2297 	cfqg = cfq_rb_first_group(st);
2298 	update_min_vdisktime(st);
2299 	return cfqg;
2300 }
2301 
cfq_choose_cfqg(struct cfq_data * cfqd)2302 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2303 {
2304 	struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2305 
2306 	cfqd->serving_group = cfqg;
2307 
2308 	/* Restore the workload type data */
2309 	if (cfqg->saved_workload_slice) {
2310 		cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2311 		cfqd->serving_type = cfqg->saved_workload;
2312 		cfqd->serving_prio = cfqg->saved_serving_prio;
2313 	} else
2314 		cfqd->workload_expires = jiffies - 1;
2315 
2316 	choose_service_tree(cfqd, cfqg);
2317 }
2318 
2319 /*
2320  * Select a queue for service. If we have a current active queue,
2321  * check whether to continue servicing it, or retrieve and set a new one.
2322  */
cfq_select_queue(struct cfq_data * cfqd)2323 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2324 {
2325 	struct cfq_queue *cfqq, *new_cfqq = NULL;
2326 
2327 	cfqq = cfqd->active_queue;
2328 	if (!cfqq)
2329 		goto new_queue;
2330 
2331 	if (!cfqd->rq_queued)
2332 		return NULL;
2333 
2334 	/*
2335 	 * We were waiting for group to get backlogged. Expire the queue
2336 	 */
2337 	if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2338 		goto expire;
2339 
2340 	/*
2341 	 * The active queue has run out of time, expire it and select new.
2342 	 */
2343 	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2344 		/*
2345 		 * If slice had not expired at the completion of last request
2346 		 * we might not have turned on wait_busy flag. Don't expire
2347 		 * the queue yet. Allow the group to get backlogged.
2348 		 *
2349 		 * The very fact that we have used the slice, that means we
2350 		 * have been idling all along on this queue and it should be
2351 		 * ok to wait for this request to complete.
2352 		 */
2353 		if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2354 		    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2355 			cfqq = NULL;
2356 			goto keep_queue;
2357 		} else
2358 			goto check_group_idle;
2359 	}
2360 
2361 	/*
2362 	 * The active queue has requests and isn't expired, allow it to
2363 	 * dispatch.
2364 	 */
2365 	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2366 		goto keep_queue;
2367 
2368 	/*
2369 	 * If another queue has a request waiting within our mean seek
2370 	 * distance, let it run.  The expire code will check for close
2371 	 * cooperators and put the close queue at the front of the service
2372 	 * tree.  If possible, merge the expiring queue with the new cfqq.
2373 	 */
2374 	new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2375 	if (new_cfqq) {
2376 		if (!cfqq->new_cfqq)
2377 			cfq_setup_merge(cfqq, new_cfqq);
2378 		goto expire;
2379 	}
2380 
2381 	/*
2382 	 * No requests pending. If the active queue still has requests in
2383 	 * flight or is idling for a new request, allow either of these
2384 	 * conditions to happen (or time out) before selecting a new queue.
2385 	 */
2386 	if (timer_pending(&cfqd->idle_slice_timer)) {
2387 		cfqq = NULL;
2388 		goto keep_queue;
2389 	}
2390 
2391 	/*
2392 	 * This is a deep seek queue, but the device is much faster than
2393 	 * the queue can deliver, don't idle
2394 	 **/
2395 	if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2396 	    (cfq_cfqq_slice_new(cfqq) ||
2397 	    (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2398 		cfq_clear_cfqq_deep(cfqq);
2399 		cfq_clear_cfqq_idle_window(cfqq);
2400 	}
2401 
2402 	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2403 		cfqq = NULL;
2404 		goto keep_queue;
2405 	}
2406 
2407 	/*
2408 	 * If group idle is enabled and there are requests dispatched from
2409 	 * this group, wait for requests to complete.
2410 	 */
2411 check_group_idle:
2412 	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2413 	    cfqq->cfqg->dispatched &&
2414 	    !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
2415 		cfqq = NULL;
2416 		goto keep_queue;
2417 	}
2418 
2419 expire:
2420 	cfq_slice_expired(cfqd, 0);
2421 new_queue:
2422 	/*
2423 	 * Current queue expired. Check if we have to switch to a new
2424 	 * service tree
2425 	 */
2426 	if (!new_cfqq)
2427 		cfq_choose_cfqg(cfqd);
2428 
2429 	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2430 keep_queue:
2431 	return cfqq;
2432 }
2433 
__cfq_forced_dispatch_cfqq(struct cfq_queue * cfqq)2434 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2435 {
2436 	int dispatched = 0;
2437 
2438 	while (cfqq->next_rq) {
2439 		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2440 		dispatched++;
2441 	}
2442 
2443 	BUG_ON(!list_empty(&cfqq->fifo));
2444 
2445 	/* By default cfqq is not expired if it is empty. Do it explicitly */
2446 	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2447 	return dispatched;
2448 }
2449 
2450 /*
2451  * Drain our current requests. Used for barriers and when switching
2452  * io schedulers on-the-fly.
2453  */
cfq_forced_dispatch(struct cfq_data * cfqd)2454 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2455 {
2456 	struct cfq_queue *cfqq;
2457 	int dispatched = 0;
2458 
2459 	/* Expire the timeslice of the current active queue first */
2460 	cfq_slice_expired(cfqd, 0);
2461 	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2462 		__cfq_set_active_queue(cfqd, cfqq);
2463 		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2464 	}
2465 
2466 	BUG_ON(cfqd->busy_queues);
2467 
2468 	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2469 	return dispatched;
2470 }
2471 
cfq_slice_used_soon(struct cfq_data * cfqd,struct cfq_queue * cfqq)2472 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2473 	struct cfq_queue *cfqq)
2474 {
2475 	/* the queue hasn't finished any request, can't estimate */
2476 	if (cfq_cfqq_slice_new(cfqq))
2477 		return true;
2478 	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2479 		cfqq->slice_end))
2480 		return true;
2481 
2482 	return false;
2483 }
2484 
cfq_may_dispatch(struct cfq_data * cfqd,struct cfq_queue * cfqq)2485 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2486 {
2487 	unsigned int max_dispatch;
2488 
2489 	/*
2490 	 * Drain async requests before we start sync IO
2491 	 */
2492 	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2493 		return false;
2494 
2495 	/*
2496 	 * If this is an async queue and we have sync IO in flight, let it wait
2497 	 */
2498 	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2499 		return false;
2500 
2501 	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2502 	if (cfq_class_idle(cfqq))
2503 		max_dispatch = 1;
2504 
2505 	/*
2506 	 * Does this cfqq already have too much IO in flight?
2507 	 */
2508 	if (cfqq->dispatched >= max_dispatch) {
2509 		bool promote_sync = false;
2510 		/*
2511 		 * idle queue must always only have a single IO in flight
2512 		 */
2513 		if (cfq_class_idle(cfqq))
2514 			return false;
2515 
2516 		/*
2517 		 * If there is only one sync queue
2518 		 * we can ignore async queue here and give the sync
2519 		 * queue no dispatch limit. The reason is a sync queue can
2520 		 * preempt async queue, limiting the sync queue doesn't make
2521 		 * sense. This is useful for aiostress test.
2522 		 */
2523 		if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2524 			promote_sync = true;
2525 
2526 		/*
2527 		 * We have other queues, don't allow more IO from this one
2528 		 */
2529 		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2530 				!promote_sync)
2531 			return false;
2532 
2533 		/*
2534 		 * Sole queue user, no limit
2535 		 */
2536 		if (cfqd->busy_queues == 1 || promote_sync)
2537 			max_dispatch = -1;
2538 		else
2539 			/*
2540 			 * Normally we start throttling cfqq when cfq_quantum/2
2541 			 * requests have been dispatched. But we can drive
2542 			 * deeper queue depths at the beginning of slice
2543 			 * subjected to upper limit of cfq_quantum.
2544 			 * */
2545 			max_dispatch = cfqd->cfq_quantum;
2546 	}
2547 
2548 	/*
2549 	 * Async queues must wait a bit before being allowed dispatch.
2550 	 * We also ramp up the dispatch depth gradually for async IO,
2551 	 * based on the last sync IO we serviced
2552 	 */
2553 	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2554 		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2555 		unsigned int depth;
2556 
2557 		depth = last_sync / cfqd->cfq_slice[1];
2558 		if (!depth && !cfqq->dispatched)
2559 			depth = 1;
2560 		if (depth < max_dispatch)
2561 			max_dispatch = depth;
2562 	}
2563 
2564 	/*
2565 	 * If we're below the current max, allow a dispatch
2566 	 */
2567 	return cfqq->dispatched < max_dispatch;
2568 }
2569 
2570 /*
2571  * Dispatch a request from cfqq, moving them to the request queue
2572  * dispatch list.
2573  */
cfq_dispatch_request(struct cfq_data * cfqd,struct cfq_queue * cfqq)2574 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2575 {
2576 	struct request *rq;
2577 
2578 	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2579 
2580 	if (!cfq_may_dispatch(cfqd, cfqq))
2581 		return false;
2582 
2583 	/*
2584 	 * follow expired path, else get first next available
2585 	 */
2586 	rq = cfq_check_fifo(cfqq);
2587 	if (!rq)
2588 		rq = cfqq->next_rq;
2589 
2590 	/*
2591 	 * insert request into driver dispatch list
2592 	 */
2593 	cfq_dispatch_insert(cfqd->queue, rq);
2594 
2595 	if (!cfqd->active_cic) {
2596 		struct cfq_io_cq *cic = RQ_CIC(rq);
2597 
2598 		atomic_long_inc(&cic->icq.ioc->refcount);
2599 		cfqd->active_cic = cic;
2600 	}
2601 
2602 	return true;
2603 }
2604 
2605 /*
2606  * Find the cfqq that we need to service and move a request from that to the
2607  * dispatch list
2608  */
cfq_dispatch_requests(struct request_queue * q,int force)2609 static int cfq_dispatch_requests(struct request_queue *q, int force)
2610 {
2611 	struct cfq_data *cfqd = q->elevator->elevator_data;
2612 	struct cfq_queue *cfqq;
2613 
2614 	if (!cfqd->busy_queues)
2615 		return 0;
2616 
2617 	if (unlikely(force))
2618 		return cfq_forced_dispatch(cfqd);
2619 
2620 	cfqq = cfq_select_queue(cfqd);
2621 	if (!cfqq)
2622 		return 0;
2623 
2624 	/*
2625 	 * Dispatch a request from this cfqq, if it is allowed
2626 	 */
2627 	if (!cfq_dispatch_request(cfqd, cfqq))
2628 		return 0;
2629 
2630 	cfqq->slice_dispatch++;
2631 	cfq_clear_cfqq_must_dispatch(cfqq);
2632 
2633 	/*
2634 	 * expire an async queue immediately if it has used up its slice. idle
2635 	 * queue always expire after 1 dispatch round.
2636 	 */
2637 	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2638 	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2639 	    cfq_class_idle(cfqq))) {
2640 		cfqq->slice_end = jiffies + 1;
2641 		cfq_slice_expired(cfqd, 0);
2642 	}
2643 
2644 	cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2645 	return 1;
2646 }
2647 
2648 /*
2649  * task holds one reference to the queue, dropped when task exits. each rq
2650  * in-flight on this queue also holds a reference, dropped when rq is freed.
2651  *
2652  * Each cfq queue took a reference on the parent group. Drop it now.
2653  * queue lock must be held here.
2654  */
cfq_put_queue(struct cfq_queue * cfqq)2655 static void cfq_put_queue(struct cfq_queue *cfqq)
2656 {
2657 	struct cfq_data *cfqd = cfqq->cfqd;
2658 	struct cfq_group *cfqg;
2659 
2660 	BUG_ON(cfqq->ref <= 0);
2661 
2662 	cfqq->ref--;
2663 	if (cfqq->ref)
2664 		return;
2665 
2666 	cfq_log_cfqq(cfqd, cfqq, "put_queue");
2667 	BUG_ON(rb_first(&cfqq->sort_list));
2668 	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2669 	cfqg = cfqq->cfqg;
2670 
2671 	if (unlikely(cfqd->active_queue == cfqq)) {
2672 		__cfq_slice_expired(cfqd, cfqq, 0);
2673 		cfq_schedule_dispatch(cfqd);
2674 	}
2675 
2676 	BUG_ON(cfq_cfqq_on_rr(cfqq));
2677 	kmem_cache_free(cfq_pool, cfqq);
2678 	cfq_put_cfqg(cfqg);
2679 }
2680 
cfq_put_cooperator(struct cfq_queue * cfqq)2681 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2682 {
2683 	struct cfq_queue *__cfqq, *next;
2684 
2685 	/*
2686 	 * If this queue was scheduled to merge with another queue, be
2687 	 * sure to drop the reference taken on that queue (and others in
2688 	 * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2689 	 */
2690 	__cfqq = cfqq->new_cfqq;
2691 	while (__cfqq) {
2692 		if (__cfqq == cfqq) {
2693 			WARN(1, "cfqq->new_cfqq loop detected\n");
2694 			break;
2695 		}
2696 		next = __cfqq->new_cfqq;
2697 		cfq_put_queue(__cfqq);
2698 		__cfqq = next;
2699 	}
2700 }
2701 
cfq_exit_cfqq(struct cfq_data * cfqd,struct cfq_queue * cfqq)2702 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2703 {
2704 	if (unlikely(cfqq == cfqd->active_queue)) {
2705 		__cfq_slice_expired(cfqd, cfqq, 0);
2706 		cfq_schedule_dispatch(cfqd);
2707 	}
2708 
2709 	cfq_put_cooperator(cfqq);
2710 
2711 	cfq_put_queue(cfqq);
2712 }
2713 
cfq_init_icq(struct io_cq * icq)2714 static void cfq_init_icq(struct io_cq *icq)
2715 {
2716 	struct cfq_io_cq *cic = icq_to_cic(icq);
2717 
2718 	cic->ttime.last_end_request = jiffies;
2719 }
2720 
cfq_exit_icq(struct io_cq * icq)2721 static void cfq_exit_icq(struct io_cq *icq)
2722 {
2723 	struct cfq_io_cq *cic = icq_to_cic(icq);
2724 	struct cfq_data *cfqd = cic_to_cfqd(cic);
2725 
2726 	if (cic->cfqq[BLK_RW_ASYNC]) {
2727 		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2728 		cic->cfqq[BLK_RW_ASYNC] = NULL;
2729 	}
2730 
2731 	if (cic->cfqq[BLK_RW_SYNC]) {
2732 		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2733 		cic->cfqq[BLK_RW_SYNC] = NULL;
2734 	}
2735 }
2736 
cfq_init_prio_data(struct cfq_queue * cfqq,struct io_context * ioc)2737 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2738 {
2739 	struct task_struct *tsk = current;
2740 	int ioprio_class;
2741 
2742 	if (!cfq_cfqq_prio_changed(cfqq))
2743 		return;
2744 
2745 	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2746 	switch (ioprio_class) {
2747 	default:
2748 		printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2749 	case IOPRIO_CLASS_NONE:
2750 		/*
2751 		 * no prio set, inherit CPU scheduling settings
2752 		 */
2753 		cfqq->ioprio = task_nice_ioprio(tsk);
2754 		cfqq->ioprio_class = task_nice_ioclass(tsk);
2755 		break;
2756 	case IOPRIO_CLASS_RT:
2757 		cfqq->ioprio = task_ioprio(ioc);
2758 		cfqq->ioprio_class = IOPRIO_CLASS_RT;
2759 		break;
2760 	case IOPRIO_CLASS_BE:
2761 		cfqq->ioprio = task_ioprio(ioc);
2762 		cfqq->ioprio_class = IOPRIO_CLASS_BE;
2763 		break;
2764 	case IOPRIO_CLASS_IDLE:
2765 		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2766 		cfqq->ioprio = 7;
2767 		cfq_clear_cfqq_idle_window(cfqq);
2768 		break;
2769 	}
2770 
2771 	/*
2772 	 * keep track of original prio settings in case we have to temporarily
2773 	 * elevate the priority of this queue
2774 	 */
2775 	cfqq->org_ioprio = cfqq->ioprio;
2776 	cfq_clear_cfqq_prio_changed(cfqq);
2777 }
2778 
changed_ioprio(struct cfq_io_cq * cic)2779 static void changed_ioprio(struct cfq_io_cq *cic)
2780 {
2781 	struct cfq_data *cfqd = cic_to_cfqd(cic);
2782 	struct cfq_queue *cfqq;
2783 
2784 	if (unlikely(!cfqd))
2785 		return;
2786 
2787 	cfqq = cic->cfqq[BLK_RW_ASYNC];
2788 	if (cfqq) {
2789 		struct cfq_queue *new_cfqq;
2790 		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
2791 						GFP_ATOMIC);
2792 		if (new_cfqq) {
2793 			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2794 			cfq_put_queue(cfqq);
2795 		}
2796 	}
2797 
2798 	cfqq = cic->cfqq[BLK_RW_SYNC];
2799 	if (cfqq)
2800 		cfq_mark_cfqq_prio_changed(cfqq);
2801 }
2802 
cfq_init_cfqq(struct cfq_data * cfqd,struct cfq_queue * cfqq,pid_t pid,bool is_sync)2803 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2804 			  pid_t pid, bool is_sync)
2805 {
2806 	RB_CLEAR_NODE(&cfqq->rb_node);
2807 	RB_CLEAR_NODE(&cfqq->p_node);
2808 	INIT_LIST_HEAD(&cfqq->fifo);
2809 
2810 	cfqq->ref = 0;
2811 	cfqq->cfqd = cfqd;
2812 
2813 	cfq_mark_cfqq_prio_changed(cfqq);
2814 
2815 	if (is_sync) {
2816 		if (!cfq_class_idle(cfqq))
2817 			cfq_mark_cfqq_idle_window(cfqq);
2818 		cfq_mark_cfqq_sync(cfqq);
2819 	}
2820 	cfqq->pid = pid;
2821 }
2822 
2823 #ifdef CONFIG_CFQ_GROUP_IOSCHED
changed_cgroup(struct cfq_io_cq * cic)2824 static void changed_cgroup(struct cfq_io_cq *cic)
2825 {
2826 	struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2827 	struct cfq_data *cfqd = cic_to_cfqd(cic);
2828 	struct request_queue *q;
2829 
2830 	if (unlikely(!cfqd))
2831 		return;
2832 
2833 	q = cfqd->queue;
2834 
2835 	if (sync_cfqq) {
2836 		/*
2837 		 * Drop reference to sync queue. A new sync queue will be
2838 		 * assigned in new group upon arrival of a fresh request.
2839 		 */
2840 		cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2841 		cic_set_cfqq(cic, NULL, 1);
2842 		cfq_put_queue(sync_cfqq);
2843 	}
2844 }
2845 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2846 
2847 static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data * cfqd,bool is_sync,struct io_context * ioc,gfp_t gfp_mask)2848 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2849 		     struct io_context *ioc, gfp_t gfp_mask)
2850 {
2851 	struct cfq_queue *cfqq, *new_cfqq = NULL;
2852 	struct cfq_io_cq *cic;
2853 	struct cfq_group *cfqg;
2854 
2855 retry:
2856 	cfqg = cfq_get_cfqg(cfqd);
2857 	cic = cfq_cic_lookup(cfqd, ioc);
2858 	/* cic always exists here */
2859 	cfqq = cic_to_cfqq(cic, is_sync);
2860 
2861 	/*
2862 	 * Always try a new alloc if we fell back to the OOM cfqq
2863 	 * originally, since it should just be a temporary situation.
2864 	 */
2865 	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2866 		cfqq = NULL;
2867 		if (new_cfqq) {
2868 			cfqq = new_cfqq;
2869 			new_cfqq = NULL;
2870 		} else if (gfp_mask & __GFP_WAIT) {
2871 			spin_unlock_irq(cfqd->queue->queue_lock);
2872 			new_cfqq = kmem_cache_alloc_node(cfq_pool,
2873 					gfp_mask | __GFP_ZERO,
2874 					cfqd->queue->node);
2875 			spin_lock_irq(cfqd->queue->queue_lock);
2876 			if (new_cfqq)
2877 				goto retry;
2878 		} else {
2879 			cfqq = kmem_cache_alloc_node(cfq_pool,
2880 					gfp_mask | __GFP_ZERO,
2881 					cfqd->queue->node);
2882 		}
2883 
2884 		if (cfqq) {
2885 			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2886 			cfq_init_prio_data(cfqq, ioc);
2887 			cfq_link_cfqq_cfqg(cfqq, cfqg);
2888 			cfq_log_cfqq(cfqd, cfqq, "alloced");
2889 		} else
2890 			cfqq = &cfqd->oom_cfqq;
2891 	}
2892 
2893 	if (new_cfqq)
2894 		kmem_cache_free(cfq_pool, new_cfqq);
2895 
2896 	return cfqq;
2897 }
2898 
2899 static struct cfq_queue **
cfq_async_queue_prio(struct cfq_data * cfqd,int ioprio_class,int ioprio)2900 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2901 {
2902 	switch (ioprio_class) {
2903 	case IOPRIO_CLASS_RT:
2904 		return &cfqd->async_cfqq[0][ioprio];
2905 	case IOPRIO_CLASS_BE:
2906 		return &cfqd->async_cfqq[1][ioprio];
2907 	case IOPRIO_CLASS_IDLE:
2908 		return &cfqd->async_idle_cfqq;
2909 	default:
2910 		BUG();
2911 	}
2912 }
2913 
2914 static struct cfq_queue *
cfq_get_queue(struct cfq_data * cfqd,bool is_sync,struct io_context * ioc,gfp_t gfp_mask)2915 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2916 	      gfp_t gfp_mask)
2917 {
2918 	const int ioprio = task_ioprio(ioc);
2919 	const int ioprio_class = task_ioprio_class(ioc);
2920 	struct cfq_queue **async_cfqq = NULL;
2921 	struct cfq_queue *cfqq = NULL;
2922 
2923 	if (!is_sync) {
2924 		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2925 		cfqq = *async_cfqq;
2926 	}
2927 
2928 	if (!cfqq)
2929 		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2930 
2931 	/*
2932 	 * pin the queue now that it's allocated, scheduler exit will prune it
2933 	 */
2934 	if (!is_sync && !(*async_cfqq)) {
2935 		cfqq->ref++;
2936 		*async_cfqq = cfqq;
2937 	}
2938 
2939 	cfqq->ref++;
2940 	return cfqq;
2941 }
2942 
2943 static void
__cfq_update_io_thinktime(struct cfq_ttime * ttime,unsigned long slice_idle)2944 __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
2945 {
2946 	unsigned long elapsed = jiffies - ttime->last_end_request;
2947 	elapsed = min(elapsed, 2UL * slice_idle);
2948 
2949 	ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
2950 	ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
2951 	ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
2952 }
2953 
2954 static void
cfq_update_io_thinktime(struct cfq_data * cfqd,struct cfq_queue * cfqq,struct cfq_io_cq * cic)2955 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2956 			struct cfq_io_cq *cic)
2957 {
2958 	if (cfq_cfqq_sync(cfqq)) {
2959 		__cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
2960 		__cfq_update_io_thinktime(&cfqq->service_tree->ttime,
2961 			cfqd->cfq_slice_idle);
2962 	}
2963 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2964 	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
2965 #endif
2966 }
2967 
2968 static void
cfq_update_io_seektime(struct cfq_data * cfqd,struct cfq_queue * cfqq,struct request * rq)2969 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2970 		       struct request *rq)
2971 {
2972 	sector_t sdist = 0;
2973 	sector_t n_sec = blk_rq_sectors(rq);
2974 	if (cfqq->last_request_pos) {
2975 		if (cfqq->last_request_pos < blk_rq_pos(rq))
2976 			sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2977 		else
2978 			sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2979 	}
2980 
2981 	cfqq->seek_history <<= 1;
2982 	if (blk_queue_nonrot(cfqd->queue))
2983 		cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
2984 	else
2985 		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
2986 }
2987 
2988 /*
2989  * Disable idle window if the process thinks too long or seeks so much that
2990  * it doesn't matter
2991  */
2992 static void
cfq_update_idle_window(struct cfq_data * cfqd,struct cfq_queue * cfqq,struct cfq_io_cq * cic)2993 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2994 		       struct cfq_io_cq *cic)
2995 {
2996 	int old_idle, enable_idle;
2997 
2998 	/*
2999 	 * Don't idle for async or idle io prio class
3000 	 */
3001 	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3002 		return;
3003 
3004 	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3005 
3006 	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3007 		cfq_mark_cfqq_deep(cfqq);
3008 
3009 	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3010 		enable_idle = 0;
3011 	else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
3012 		 !cfqd->cfq_slice_idle ||
3013 		 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3014 		enable_idle = 0;
3015 	else if (sample_valid(cic->ttime.ttime_samples)) {
3016 		if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3017 			enable_idle = 0;
3018 		else
3019 			enable_idle = 1;
3020 	}
3021 
3022 	if (old_idle != enable_idle) {
3023 		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3024 		if (enable_idle)
3025 			cfq_mark_cfqq_idle_window(cfqq);
3026 		else
3027 			cfq_clear_cfqq_idle_window(cfqq);
3028 	}
3029 }
3030 
3031 /*
3032  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3033  * no or if we aren't sure, a 1 will cause a preempt.
3034  */
3035 static bool
cfq_should_preempt(struct cfq_data * cfqd,struct cfq_queue * new_cfqq,struct request * rq)3036 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3037 		   struct request *rq)
3038 {
3039 	struct cfq_queue *cfqq;
3040 
3041 	cfqq = cfqd->active_queue;
3042 	if (!cfqq)
3043 		return false;
3044 
3045 	if (cfq_class_idle(new_cfqq))
3046 		return false;
3047 
3048 	if (cfq_class_idle(cfqq))
3049 		return true;
3050 
3051 	/*
3052 	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3053 	 */
3054 	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3055 		return false;
3056 
3057 	/*
3058 	 * if the new request is sync, but the currently running queue is
3059 	 * not, let the sync request have priority.
3060 	 */
3061 	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3062 		return true;
3063 
3064 	if (new_cfqq->cfqg != cfqq->cfqg)
3065 		return false;
3066 
3067 	if (cfq_slice_used(cfqq))
3068 		return true;
3069 
3070 	/* Allow preemption only if we are idling on sync-noidle tree */
3071 	if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3072 	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3073 	    new_cfqq->service_tree->count == 2 &&
3074 	    RB_EMPTY_ROOT(&cfqq->sort_list))
3075 		return true;
3076 
3077 	/*
3078 	 * So both queues are sync. Let the new request get disk time if
3079 	 * it's a metadata request and the current queue is doing regular IO.
3080 	 */
3081 	if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3082 		return true;
3083 
3084 	/*
3085 	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3086 	 */
3087 	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3088 		return true;
3089 
3090 	/* An idle queue should not be idle now for some reason */
3091 	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3092 		return true;
3093 
3094 	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3095 		return false;
3096 
3097 	/*
3098 	 * if this request is as-good as one we would expect from the
3099 	 * current cfqq, let it preempt
3100 	 */
3101 	if (cfq_rq_close(cfqd, cfqq, rq))
3102 		return true;
3103 
3104 	return false;
3105 }
3106 
3107 /*
3108  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3109  * let it have half of its nominal slice.
3110  */
cfq_preempt_queue(struct cfq_data * cfqd,struct cfq_queue * cfqq)3111 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3112 {
3113 	enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3114 
3115 	cfq_log_cfqq(cfqd, cfqq, "preempt");
3116 	cfq_slice_expired(cfqd, 1);
3117 
3118 	/*
3119 	 * workload type is changed, don't save slice, otherwise preempt
3120 	 * doesn't happen
3121 	 */
3122 	if (old_type != cfqq_type(cfqq))
3123 		cfqq->cfqg->saved_workload_slice = 0;
3124 
3125 	/*
3126 	 * Put the new queue at the front of the of the current list,
3127 	 * so we know that it will be selected next.
3128 	 */
3129 	BUG_ON(!cfq_cfqq_on_rr(cfqq));
3130 
3131 	cfq_service_tree_add(cfqd, cfqq, 1);
3132 
3133 	cfqq->slice_end = 0;
3134 	cfq_mark_cfqq_slice_new(cfqq);
3135 }
3136 
3137 /*
3138  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3139  * something we should do about it
3140  */
3141 static void
cfq_rq_enqueued(struct cfq_data * cfqd,struct cfq_queue * cfqq,struct request * rq)3142 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3143 		struct request *rq)
3144 {
3145 	struct cfq_io_cq *cic = RQ_CIC(rq);
3146 
3147 	cfqd->rq_queued++;
3148 	if (rq->cmd_flags & REQ_PRIO)
3149 		cfqq->prio_pending++;
3150 
3151 	cfq_update_io_thinktime(cfqd, cfqq, cic);
3152 	cfq_update_io_seektime(cfqd, cfqq, rq);
3153 	cfq_update_idle_window(cfqd, cfqq, cic);
3154 
3155 	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3156 
3157 	if (cfqq == cfqd->active_queue) {
3158 		/*
3159 		 * Remember that we saw a request from this process, but
3160 		 * don't start queuing just yet. Otherwise we risk seeing lots
3161 		 * of tiny requests, because we disrupt the normal plugging
3162 		 * and merging. If the request is already larger than a single
3163 		 * page, let it rip immediately. For that case we assume that
3164 		 * merging is already done. Ditto for a busy system that
3165 		 * has other work pending, don't risk delaying until the
3166 		 * idle timer unplug to continue working.
3167 		 */
3168 		if (cfq_cfqq_wait_request(cfqq)) {
3169 			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3170 			    cfqd->busy_queues > 1) {
3171 				cfq_del_timer(cfqd, cfqq);
3172 				cfq_clear_cfqq_wait_request(cfqq);
3173 				__blk_run_queue(cfqd->queue);
3174 			} else {
3175 				cfq_blkiocg_update_idle_time_stats(
3176 						&cfqq->cfqg->blkg);
3177 				cfq_mark_cfqq_must_dispatch(cfqq);
3178 			}
3179 		}
3180 	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3181 		/*
3182 		 * not the active queue - expire current slice if it is
3183 		 * idle and has expired it's mean thinktime or this new queue
3184 		 * has some old slice time left and is of higher priority or
3185 		 * this new queue is RT and the current one is BE
3186 		 */
3187 		cfq_preempt_queue(cfqd, cfqq);
3188 		__blk_run_queue(cfqd->queue);
3189 	}
3190 }
3191 
cfq_insert_request(struct request_queue * q,struct request * rq)3192 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3193 {
3194 	struct cfq_data *cfqd = q->elevator->elevator_data;
3195 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
3196 
3197 	cfq_log_cfqq(cfqd, cfqq, "insert_request");
3198 	cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
3199 
3200 	rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3201 	list_add_tail(&rq->queuelist, &cfqq->fifo);
3202 	cfq_add_rq_rb(rq);
3203 	cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3204 			&cfqd->serving_group->blkg, rq_data_dir(rq),
3205 			rq_is_sync(rq));
3206 	cfq_rq_enqueued(cfqd, cfqq, rq);
3207 }
3208 
3209 /*
3210  * Update hw_tag based on peak queue depth over 50 samples under
3211  * sufficient load.
3212  */
cfq_update_hw_tag(struct cfq_data * cfqd)3213 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3214 {
3215 	struct cfq_queue *cfqq = cfqd->active_queue;
3216 
3217 	if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3218 		cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3219 
3220 	if (cfqd->hw_tag == 1)
3221 		return;
3222 
3223 	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3224 	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3225 		return;
3226 
3227 	/*
3228 	 * If active queue hasn't enough requests and can idle, cfq might not
3229 	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3230 	 * case
3231 	 */
3232 	if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3233 	    cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3234 	    CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3235 		return;
3236 
3237 	if (cfqd->hw_tag_samples++ < 50)
3238 		return;
3239 
3240 	if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3241 		cfqd->hw_tag = 1;
3242 	else
3243 		cfqd->hw_tag = 0;
3244 }
3245 
cfq_should_wait_busy(struct cfq_data * cfqd,struct cfq_queue * cfqq)3246 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3247 {
3248 	struct cfq_io_cq *cic = cfqd->active_cic;
3249 
3250 	/* If the queue already has requests, don't wait */
3251 	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3252 		return false;
3253 
3254 	/* If there are other queues in the group, don't wait */
3255 	if (cfqq->cfqg->nr_cfqq > 1)
3256 		return false;
3257 
3258 	/* the only queue in the group, but think time is big */
3259 	if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3260 		return false;
3261 
3262 	if (cfq_slice_used(cfqq))
3263 		return true;
3264 
3265 	/* if slice left is less than think time, wait busy */
3266 	if (cic && sample_valid(cic->ttime.ttime_samples)
3267 	    && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3268 		return true;
3269 
3270 	/*
3271 	 * If think times is less than a jiffy than ttime_mean=0 and above
3272 	 * will not be true. It might happen that slice has not expired yet
3273 	 * but will expire soon (4-5 ns) during select_queue(). To cover the
3274 	 * case where think time is less than a jiffy, mark the queue wait
3275 	 * busy if only 1 jiffy is left in the slice.
3276 	 */
3277 	if (cfqq->slice_end - jiffies == 1)
3278 		return true;
3279 
3280 	return false;
3281 }
3282 
cfq_completed_request(struct request_queue * q,struct request * rq)3283 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3284 {
3285 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
3286 	struct cfq_data *cfqd = cfqq->cfqd;
3287 	const int sync = rq_is_sync(rq);
3288 	unsigned long now;
3289 
3290 	now = jiffies;
3291 	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3292 		     !!(rq->cmd_flags & REQ_NOIDLE));
3293 
3294 	cfq_update_hw_tag(cfqd);
3295 
3296 	WARN_ON(!cfqd->rq_in_driver);
3297 	WARN_ON(!cfqq->dispatched);
3298 	cfqd->rq_in_driver--;
3299 	cfqq->dispatched--;
3300 	(RQ_CFQG(rq))->dispatched--;
3301 	cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3302 			rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3303 			rq_data_dir(rq), rq_is_sync(rq));
3304 
3305 	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3306 
3307 	if (sync) {
3308 		struct cfq_rb_root *service_tree;
3309 
3310 		RQ_CIC(rq)->ttime.last_end_request = now;
3311 
3312 		if (cfq_cfqq_on_rr(cfqq))
3313 			service_tree = cfqq->service_tree;
3314 		else
3315 			service_tree = service_tree_for(cfqq->cfqg,
3316 				cfqq_prio(cfqq), cfqq_type(cfqq));
3317 		service_tree->ttime.last_end_request = now;
3318 		if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3319 			cfqd->last_delayed_sync = now;
3320 	}
3321 
3322 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3323 	cfqq->cfqg->ttime.last_end_request = now;
3324 #endif
3325 
3326 	/*
3327 	 * If this is the active queue, check if it needs to be expired,
3328 	 * or if we want to idle in case it has no pending requests.
3329 	 */
3330 	if (cfqd->active_queue == cfqq) {
3331 		const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3332 
3333 		if (cfq_cfqq_slice_new(cfqq)) {
3334 			cfq_set_prio_slice(cfqd, cfqq);
3335 			cfq_clear_cfqq_slice_new(cfqq);
3336 		}
3337 
3338 		/*
3339 		 * Should we wait for next request to come in before we expire
3340 		 * the queue.
3341 		 */
3342 		if (cfq_should_wait_busy(cfqd, cfqq)) {
3343 			unsigned long extend_sl = cfqd->cfq_slice_idle;
3344 			if (!cfqd->cfq_slice_idle)
3345 				extend_sl = cfqd->cfq_group_idle;
3346 			cfqq->slice_end = jiffies + extend_sl;
3347 			cfq_mark_cfqq_wait_busy(cfqq);
3348 			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3349 		}
3350 
3351 		/*
3352 		 * Idling is not enabled on:
3353 		 * - expired queues
3354 		 * - idle-priority queues
3355 		 * - async queues
3356 		 * - queues with still some requests queued
3357 		 * - when there is a close cooperator
3358 		 */
3359 		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3360 			cfq_slice_expired(cfqd, 1);
3361 		else if (sync && cfqq_empty &&
3362 			 !cfq_close_cooperator(cfqd, cfqq)) {
3363 			cfq_arm_slice_timer(cfqd);
3364 		}
3365 	}
3366 
3367 	if (!cfqd->rq_in_driver)
3368 		cfq_schedule_dispatch(cfqd);
3369 }
3370 
__cfq_may_queue(struct cfq_queue * cfqq)3371 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3372 {
3373 	if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3374 		cfq_mark_cfqq_must_alloc_slice(cfqq);
3375 		return ELV_MQUEUE_MUST;
3376 	}
3377 
3378 	return ELV_MQUEUE_MAY;
3379 }
3380 
cfq_may_queue(struct request_queue * q,int rw)3381 static int cfq_may_queue(struct request_queue *q, int rw)
3382 {
3383 	struct cfq_data *cfqd = q->elevator->elevator_data;
3384 	struct task_struct *tsk = current;
3385 	struct cfq_io_cq *cic;
3386 	struct cfq_queue *cfqq;
3387 
3388 	/*
3389 	 * don't force setup of a queue from here, as a call to may_queue
3390 	 * does not necessarily imply that a request actually will be queued.
3391 	 * so just lookup a possibly existing queue, or return 'may queue'
3392 	 * if that fails
3393 	 */
3394 	cic = cfq_cic_lookup(cfqd, tsk->io_context);
3395 	if (!cic)
3396 		return ELV_MQUEUE_MAY;
3397 
3398 	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3399 	if (cfqq) {
3400 		cfq_init_prio_data(cfqq, cic->icq.ioc);
3401 
3402 		return __cfq_may_queue(cfqq);
3403 	}
3404 
3405 	return ELV_MQUEUE_MAY;
3406 }
3407 
3408 /*
3409  * queue lock held here
3410  */
cfq_put_request(struct request * rq)3411 static void cfq_put_request(struct request *rq)
3412 {
3413 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
3414 
3415 	if (cfqq) {
3416 		const int rw = rq_data_dir(rq);
3417 
3418 		BUG_ON(!cfqq->allocated[rw]);
3419 		cfqq->allocated[rw]--;
3420 
3421 		/* Put down rq reference on cfqg */
3422 		cfq_put_cfqg(RQ_CFQG(rq));
3423 		rq->elv.priv[0] = NULL;
3424 		rq->elv.priv[1] = NULL;
3425 
3426 		cfq_put_queue(cfqq);
3427 	}
3428 }
3429 
3430 static struct cfq_queue *
cfq_merge_cfqqs(struct cfq_data * cfqd,struct cfq_io_cq * cic,struct cfq_queue * cfqq)3431 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
3432 		struct cfq_queue *cfqq)
3433 {
3434 	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3435 	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3436 	cfq_mark_cfqq_coop(cfqq->new_cfqq);
3437 	cfq_put_queue(cfqq);
3438 	return cic_to_cfqq(cic, 1);
3439 }
3440 
3441 /*
3442  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3443  * was the last process referring to said cfqq.
3444  */
3445 static struct cfq_queue *
split_cfqq(struct cfq_io_cq * cic,struct cfq_queue * cfqq)3446 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
3447 {
3448 	if (cfqq_process_refs(cfqq) == 1) {
3449 		cfqq->pid = current->pid;
3450 		cfq_clear_cfqq_coop(cfqq);
3451 		cfq_clear_cfqq_split_coop(cfqq);
3452 		return cfqq;
3453 	}
3454 
3455 	cic_set_cfqq(cic, NULL, 1);
3456 
3457 	cfq_put_cooperator(cfqq);
3458 
3459 	cfq_put_queue(cfqq);
3460 	return NULL;
3461 }
3462 /*
3463  * Allocate cfq data structures associated with this request.
3464  */
3465 static int
cfq_set_request(struct request_queue * q,struct request * rq,gfp_t gfp_mask)3466 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3467 {
3468 	struct cfq_data *cfqd = q->elevator->elevator_data;
3469 	struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
3470 	const int rw = rq_data_dir(rq);
3471 	const bool is_sync = rq_is_sync(rq);
3472 	struct cfq_queue *cfqq;
3473 	unsigned int changed;
3474 
3475 	might_sleep_if(gfp_mask & __GFP_WAIT);
3476 
3477 	spin_lock_irq(q->queue_lock);
3478 
3479 	/* handle changed notifications */
3480 	changed = icq_get_changed(&cic->icq);
3481 	if (unlikely(changed & ICQ_IOPRIO_CHANGED))
3482 		changed_ioprio(cic);
3483 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3484 	if (unlikely(changed & ICQ_CGROUP_CHANGED))
3485 		changed_cgroup(cic);
3486 #endif
3487 
3488 new_queue:
3489 	cfqq = cic_to_cfqq(cic, is_sync);
3490 	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3491 		cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
3492 		cic_set_cfqq(cic, cfqq, is_sync);
3493 	} else {
3494 		/*
3495 		 * If the queue was seeky for too long, break it apart.
3496 		 */
3497 		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3498 			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3499 			cfqq = split_cfqq(cic, cfqq);
3500 			if (!cfqq)
3501 				goto new_queue;
3502 		}
3503 
3504 		/*
3505 		 * Check to see if this queue is scheduled to merge with
3506 		 * another, closely cooperating queue.  The merging of
3507 		 * queues happens here as it must be done in process context.
3508 		 * The reference on new_cfqq was taken in merge_cfqqs.
3509 		 */
3510 		if (cfqq->new_cfqq)
3511 			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3512 	}
3513 
3514 	cfqq->allocated[rw]++;
3515 
3516 	cfqq->ref++;
3517 	rq->elv.priv[0] = cfqq;
3518 	rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
3519 	spin_unlock_irq(q->queue_lock);
3520 	return 0;
3521 }
3522 
cfq_kick_queue(struct work_struct * work)3523 static void cfq_kick_queue(struct work_struct *work)
3524 {
3525 	struct cfq_data *cfqd =
3526 		container_of(work, struct cfq_data, unplug_work);
3527 	struct request_queue *q = cfqd->queue;
3528 
3529 	spin_lock_irq(q->queue_lock);
3530 	__blk_run_queue(cfqd->queue);
3531 	spin_unlock_irq(q->queue_lock);
3532 }
3533 
3534 /*
3535  * Timer running if the active_queue is currently idling inside its time slice
3536  */
cfq_idle_slice_timer(unsigned long data)3537 static void cfq_idle_slice_timer(unsigned long data)
3538 {
3539 	struct cfq_data *cfqd = (struct cfq_data *) data;
3540 	struct cfq_queue *cfqq;
3541 	unsigned long flags;
3542 	int timed_out = 1;
3543 
3544 	cfq_log(cfqd, "idle timer fired");
3545 
3546 	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3547 
3548 	cfqq = cfqd->active_queue;
3549 	if (cfqq) {
3550 		timed_out = 0;
3551 
3552 		/*
3553 		 * We saw a request before the queue expired, let it through
3554 		 */
3555 		if (cfq_cfqq_must_dispatch(cfqq))
3556 			goto out_kick;
3557 
3558 		/*
3559 		 * expired
3560 		 */
3561 		if (cfq_slice_used(cfqq))
3562 			goto expire;
3563 
3564 		/*
3565 		 * only expire and reinvoke request handler, if there are
3566 		 * other queues with pending requests
3567 		 */
3568 		if (!cfqd->busy_queues)
3569 			goto out_cont;
3570 
3571 		/*
3572 		 * not expired and it has a request pending, let it dispatch
3573 		 */
3574 		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3575 			goto out_kick;
3576 
3577 		/*
3578 		 * Queue depth flag is reset only when the idle didn't succeed
3579 		 */
3580 		cfq_clear_cfqq_deep(cfqq);
3581 	}
3582 expire:
3583 	cfq_slice_expired(cfqd, timed_out);
3584 out_kick:
3585 	cfq_schedule_dispatch(cfqd);
3586 out_cont:
3587 	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3588 }
3589 
cfq_shutdown_timer_wq(struct cfq_data * cfqd)3590 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3591 {
3592 	del_timer_sync(&cfqd->idle_slice_timer);
3593 	cancel_work_sync(&cfqd->unplug_work);
3594 }
3595 
cfq_put_async_queues(struct cfq_data * cfqd)3596 static void cfq_put_async_queues(struct cfq_data *cfqd)
3597 {
3598 	int i;
3599 
3600 	for (i = 0; i < IOPRIO_BE_NR; i++) {
3601 		if (cfqd->async_cfqq[0][i])
3602 			cfq_put_queue(cfqd->async_cfqq[0][i]);
3603 		if (cfqd->async_cfqq[1][i])
3604 			cfq_put_queue(cfqd->async_cfqq[1][i]);
3605 	}
3606 
3607 	if (cfqd->async_idle_cfqq)
3608 		cfq_put_queue(cfqd->async_idle_cfqq);
3609 }
3610 
cfq_exit_queue(struct elevator_queue * e)3611 static void cfq_exit_queue(struct elevator_queue *e)
3612 {
3613 	struct cfq_data *cfqd = e->elevator_data;
3614 	struct request_queue *q = cfqd->queue;
3615 	bool wait = false;
3616 
3617 	cfq_shutdown_timer_wq(cfqd);
3618 
3619 	spin_lock_irq(q->queue_lock);
3620 
3621 	if (cfqd->active_queue)
3622 		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3623 
3624 	cfq_put_async_queues(cfqd);
3625 	cfq_release_cfq_groups(cfqd);
3626 
3627 	/*
3628 	 * If there are groups which we could not unlink from blkcg list,
3629 	 * wait for a rcu period for them to be freed.
3630 	 */
3631 	if (cfqd->nr_blkcg_linked_grps)
3632 		wait = true;
3633 
3634 	spin_unlock_irq(q->queue_lock);
3635 
3636 	cfq_shutdown_timer_wq(cfqd);
3637 
3638 	/*
3639 	 * Wait for cfqg->blkg->key accessors to exit their grace periods.
3640 	 * Do this wait only if there are other unlinked groups out
3641 	 * there. This can happen if cgroup deletion path claimed the
3642 	 * responsibility of cleaning up a group before queue cleanup code
3643 	 * get to the group.
3644 	 *
3645 	 * Do not call synchronize_rcu() unconditionally as there are drivers
3646 	 * which create/delete request queue hundreds of times during scan/boot
3647 	 * and synchronize_rcu() can take significant time and slow down boot.
3648 	 */
3649 	if (wait)
3650 		synchronize_rcu();
3651 
3652 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3653 	/* Free up per cpu stats for root group */
3654 	free_percpu(cfqd->root_group.blkg.stats_cpu);
3655 #endif
3656 	kfree(cfqd);
3657 }
3658 
cfq_init_queue(struct request_queue * q)3659 static void *cfq_init_queue(struct request_queue *q)
3660 {
3661 	struct cfq_data *cfqd;
3662 	int i, j;
3663 	struct cfq_group *cfqg;
3664 	struct cfq_rb_root *st;
3665 
3666 	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3667 	if (!cfqd)
3668 		return NULL;
3669 
3670 	/* Init root service tree */
3671 	cfqd->grp_service_tree = CFQ_RB_ROOT;
3672 
3673 	/* Init root group */
3674 	cfqg = &cfqd->root_group;
3675 	for_each_cfqg_st(cfqg, i, j, st)
3676 		*st = CFQ_RB_ROOT;
3677 	RB_CLEAR_NODE(&cfqg->rb_node);
3678 
3679 	/* Give preference to root group over other groups */
3680 	cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3681 
3682 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3683 	/*
3684 	 * Set root group reference to 2. One reference will be dropped when
3685 	 * all groups on cfqd->cfqg_list are being deleted during queue exit.
3686 	 * Other reference will remain there as we don't want to delete this
3687 	 * group as it is statically allocated and gets destroyed when
3688 	 * throtl_data goes away.
3689 	 */
3690 	cfqg->ref = 2;
3691 
3692 	if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
3693 		kfree(cfqg);
3694 		kfree(cfqd);
3695 		return NULL;
3696 	}
3697 
3698 	rcu_read_lock();
3699 
3700 	cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3701 					(void *)cfqd, 0);
3702 	rcu_read_unlock();
3703 	cfqd->nr_blkcg_linked_grps++;
3704 
3705 	/* Add group on cfqd->cfqg_list */
3706 	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
3707 #endif
3708 	/*
3709 	 * Not strictly needed (since RB_ROOT just clears the node and we
3710 	 * zeroed cfqd on alloc), but better be safe in case someone decides
3711 	 * to add magic to the rb code
3712 	 */
3713 	for (i = 0; i < CFQ_PRIO_LISTS; i++)
3714 		cfqd->prio_trees[i] = RB_ROOT;
3715 
3716 	/*
3717 	 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3718 	 * Grab a permanent reference to it, so that the normal code flow
3719 	 * will not attempt to free it.
3720 	 */
3721 	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3722 	cfqd->oom_cfqq.ref++;
3723 	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3724 
3725 	cfqd->queue = q;
3726 
3727 	init_timer(&cfqd->idle_slice_timer);
3728 	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3729 	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3730 
3731 	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3732 
3733 	cfqd->cfq_quantum = cfq_quantum;
3734 	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3735 	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3736 	cfqd->cfq_back_max = cfq_back_max;
3737 	cfqd->cfq_back_penalty = cfq_back_penalty;
3738 	cfqd->cfq_slice[0] = cfq_slice_async;
3739 	cfqd->cfq_slice[1] = cfq_slice_sync;
3740 	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3741 	cfqd->cfq_slice_idle = cfq_slice_idle;
3742 	cfqd->cfq_group_idle = cfq_group_idle;
3743 	cfqd->cfq_latency = 1;
3744 	cfqd->hw_tag = -1;
3745 	/*
3746 	 * we optimistically start assuming sync ops weren't delayed in last
3747 	 * second, in order to have larger depth for async operations.
3748 	 */
3749 	cfqd->last_delayed_sync = jiffies - HZ;
3750 	return cfqd;
3751 }
3752 
3753 /*
3754  * sysfs parts below -->
3755  */
3756 static ssize_t
cfq_var_show(unsigned int var,char * page)3757 cfq_var_show(unsigned int var, char *page)
3758 {
3759 	return sprintf(page, "%d\n", var);
3760 }
3761 
3762 static ssize_t
cfq_var_store(unsigned int * var,const char * page,size_t count)3763 cfq_var_store(unsigned int *var, const char *page, size_t count)
3764 {
3765 	char *p = (char *) page;
3766 
3767 	*var = simple_strtoul(p, &p, 10);
3768 	return count;
3769 }
3770 
3771 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
3772 static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
3773 {									\
3774 	struct cfq_data *cfqd = e->elevator_data;			\
3775 	unsigned int __data = __VAR;					\
3776 	if (__CONV)							\
3777 		__data = jiffies_to_msecs(__data);			\
3778 	return cfq_var_show(__data, (page));				\
3779 }
3780 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3781 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3782 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3783 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3784 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3785 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3786 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3787 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3788 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3789 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3790 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3791 #undef SHOW_FUNCTION
3792 
3793 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
3794 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
3795 {									\
3796 	struct cfq_data *cfqd = e->elevator_data;			\
3797 	unsigned int __data;						\
3798 	int ret = cfq_var_store(&__data, (page), count);		\
3799 	if (__data < (MIN))						\
3800 		__data = (MIN);						\
3801 	else if (__data > (MAX))					\
3802 		__data = (MAX);						\
3803 	if (__CONV)							\
3804 		*(__PTR) = msecs_to_jiffies(__data);			\
3805 	else								\
3806 		*(__PTR) = __data;					\
3807 	return ret;							\
3808 }
3809 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
3810 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3811 		UINT_MAX, 1);
3812 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3813 		UINT_MAX, 1);
3814 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3815 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3816 		UINT_MAX, 0);
3817 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3818 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
3819 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3820 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3821 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3822 		UINT_MAX, 0);
3823 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3824 #undef STORE_FUNCTION
3825 
3826 #define CFQ_ATTR(name) \
3827 	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3828 
3829 static struct elv_fs_entry cfq_attrs[] = {
3830 	CFQ_ATTR(quantum),
3831 	CFQ_ATTR(fifo_expire_sync),
3832 	CFQ_ATTR(fifo_expire_async),
3833 	CFQ_ATTR(back_seek_max),
3834 	CFQ_ATTR(back_seek_penalty),
3835 	CFQ_ATTR(slice_sync),
3836 	CFQ_ATTR(slice_async),
3837 	CFQ_ATTR(slice_async_rq),
3838 	CFQ_ATTR(slice_idle),
3839 	CFQ_ATTR(group_idle),
3840 	CFQ_ATTR(low_latency),
3841 	__ATTR_NULL
3842 };
3843 
3844 static struct elevator_type iosched_cfq = {
3845 	.ops = {
3846 		.elevator_merge_fn = 		cfq_merge,
3847 		.elevator_merged_fn =		cfq_merged_request,
3848 		.elevator_merge_req_fn =	cfq_merged_requests,
3849 		.elevator_allow_merge_fn =	cfq_allow_merge,
3850 		.elevator_bio_merged_fn =	cfq_bio_merged,
3851 		.elevator_dispatch_fn =		cfq_dispatch_requests,
3852 		.elevator_add_req_fn =		cfq_insert_request,
3853 		.elevator_activate_req_fn =	cfq_activate_request,
3854 		.elevator_deactivate_req_fn =	cfq_deactivate_request,
3855 		.elevator_completed_req_fn =	cfq_completed_request,
3856 		.elevator_former_req_fn =	elv_rb_former_request,
3857 		.elevator_latter_req_fn =	elv_rb_latter_request,
3858 		.elevator_init_icq_fn =		cfq_init_icq,
3859 		.elevator_exit_icq_fn =		cfq_exit_icq,
3860 		.elevator_set_req_fn =		cfq_set_request,
3861 		.elevator_put_req_fn =		cfq_put_request,
3862 		.elevator_may_queue_fn =	cfq_may_queue,
3863 		.elevator_init_fn =		cfq_init_queue,
3864 		.elevator_exit_fn =		cfq_exit_queue,
3865 	},
3866 	.icq_size	=	sizeof(struct cfq_io_cq),
3867 	.icq_align	=	__alignof__(struct cfq_io_cq),
3868 	.elevator_attrs =	cfq_attrs,
3869 	.elevator_name	=	"cfq",
3870 	.elevator_owner =	THIS_MODULE,
3871 };
3872 
3873 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3874 static struct blkio_policy_type blkio_policy_cfq = {
3875 	.ops = {
3876 		.blkio_unlink_group_fn =	cfq_unlink_blkio_group,
3877 		.blkio_update_group_weight_fn =	cfq_update_blkio_group_weight,
3878 	},
3879 	.plid = BLKIO_POLICY_PROP,
3880 };
3881 #else
3882 static struct blkio_policy_type blkio_policy_cfq;
3883 #endif
3884 
cfq_init(void)3885 static int __init cfq_init(void)
3886 {
3887 	int ret;
3888 
3889 	/*
3890 	 * could be 0 on HZ < 1000 setups
3891 	 */
3892 	if (!cfq_slice_async)
3893 		cfq_slice_async = 1;
3894 	if (!cfq_slice_idle)
3895 		cfq_slice_idle = 1;
3896 
3897 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3898 	if (!cfq_group_idle)
3899 		cfq_group_idle = 1;
3900 #else
3901 		cfq_group_idle = 0;
3902 #endif
3903 	cfq_pool = KMEM_CACHE(cfq_queue, 0);
3904 	if (!cfq_pool)
3905 		return -ENOMEM;
3906 
3907 	ret = elv_register(&iosched_cfq);
3908 	if (ret) {
3909 		kmem_cache_destroy(cfq_pool);
3910 		return ret;
3911 	}
3912 
3913 	blkio_policy_register(&blkio_policy_cfq);
3914 
3915 	return 0;
3916 }
3917 
cfq_exit(void)3918 static void __exit cfq_exit(void)
3919 {
3920 	blkio_policy_unregister(&blkio_policy_cfq);
3921 	elv_unregister(&iosched_cfq);
3922 	kmem_cache_destroy(cfq_pool);
3923 }
3924 
3925 module_init(cfq_init);
3926 module_exit(cfq_exit);
3927 
3928 MODULE_AUTHOR("Jens Axboe");
3929 MODULE_LICENSE("GPL");
3930 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
3931