Lines Matching +full:current +full:- +full:boost +full:- +full:limit
1 // SPDX-License-Identifier: GPL-2.0-or-later
16 * BFQ is a proportional-share I/O scheduler, with some extra
17 * low-latency capabilities. BFQ also supports full hierarchical
20 * limitations can be found in Documentation/block/bfq-iosched.rst.
22 * BFQ is a proportional-share storage-I/O scheduling algorithm based
23 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
25 * time slices. The device is not granted to the in-service process
31 * B-WF2Q+, to schedule processes according to their budgets. More
33 * process/queue is assigned a user-configurable weight, and B-WF2Q+
36 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
37 * processes issuing sequential requests (to boost the throughput),
38 * and yet guarantee a low latency to interactive and soft real-time
41 * In particular, to provide these low-latency guarantees, BFQ
42 * explicitly privileges the I/O of two classes of time-sensitive
43 * applications: interactive and soft real-time. In more detail, BFQ
50 * real-time application. For brevity, in these cases, the queue is
51 * said to be interactive or soft real-time. In both cases, BFQ
52 * privileges the service of the queue, over that of non-interactive
53 * and non-soft-real-time queues. This privileging is performed,
55 * call just weight-raising periods the time periods during which a
56 * queue is privileged, because deemed interactive or soft real-time.
58 * The detection of soft real-time queues/applications is described in
70 * non-empty queue stops being deemed interactive. Since a queue is
71 * weight-raised while it is deemed interactive, this maximum time
73 * weight-raising for interactive queues.
76 * preserving both a low latency and a high throughput on NCQ-capable,
77 * rotational or flash-based devices, and to get the job done quickly
78 * for applications consisting in many I/O-bound processes.
81 * the maximum-possible throughput at all times, then do switch off
82 * all low-latency heuristics for that device, by setting low_latency
91 * ones that guarantee a low latency to interactive and soft real-time
92 * applications, and a hierarchical extension based on H-WF2Q+.
94 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
95 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
101 * Technologies (MST-2015), May 2015.
102 * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
105 * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
108 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
110 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
114 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
126 #include <linux/backing-dev.h>
129 #include "blk-mq.h"
130 #include "blk-mq-tag.h"
131 #include "blk-mq-sched.h"
132 #include "bfq-iosched.h"
133 #include "blk-wbt.h"
138 __set_bit(BFQQF_##name, &(bfqq)->flags); \
142 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
146 return test_bit(BFQQF_##name, &(bfqq)->flags); \
192 * The current value of this parameter is the result of a tuning with
198 * - when the group does writes, w.r.t. to when it does reads;
199 * - when other groups do reads, w.r.t. to when they do writes.
207 * Time limit for merging (see comments in bfq_setup_cooperator). Set
212 * As can be deduced from the low time limit below, queue merging, if
234 (!blk_queue_nonrot(bfqd->queue) || \
237 #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
239 * Sync random I/O is likely to be confused with soft real-time I/O,
243 * as soft real-time.
245 #define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
247 /* Min number of samples required to perform peak-rate update */
249 /* Min observation time interval required to perform a peak-rate update (ns) */
251 /* Target observation time interval for a peak-rate update (ns) */
255 * Shift used for peak-rate fixed precision calculations.
257 * - the current shift: 16 positions
258 * - the current type used to store rate: u32
259 * - the current unit of measure for rate: [sectors/usec], or, more precisely,
262 * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
263 * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
271 * When configured for computing the duration of the weight-raising
288 * depending on whether the device is rotational or non-rotational.
293 * non-rotational device. The reference rates are not the actual peak
296 * peak-rate estimator tends to yield slightly lower values than the
301 * The reference peak rates are measured in sectors/usec, left-shifted
313 * BFQ uses the above-detailed, time-based weight-raising mechanism to
315 * following false positives: I/O-bound applications that will go on
318 * weight-raised at the beginning of their I/O. On the opposite end,
319 * while being weight-raised, these applications
323 * in loss of device throughput with most flash-based storage, and may
328 * finish explaining how the duration of weight-raising for
337 * reference interactive task is the start-up of LibreOffice Writer,
342 * duration of weight-raising for at least one class of I/O-bound
343 * applications: those doing sequential or quasi-sequential I/O. An
344 * example is file copy. In fact, once started, the main I/O-bound
347 * is starting, because these I/O-bound processes will greedily devote
349 * throughput-friendly I/O operations. This is even more true if BFQ
353 * have no right to be weight-raised any longer.
355 * Basing on the last consideration, BFQ ends weight-raising for a
360 * This early ending of weight-raising reduces the amount of time
366 #define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
367 #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
371 return bic->bfqq[is_sync]; in bic_to_bfqq()
376 bic->bfqq[is_sync] = bfqq; in bic_set_bfqq()
381 return bic->icq.q->elevator->elevator_data; in bic_to_bfqd()
385 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
390 /* bic->icq is the first member, %NULL will convert to %NULL */ in icq_to_bic()
395 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
408 spin_lock_irqsave(&q->queue_lock, flags); in bfq_bic_lookup()
410 spin_unlock_irqrestore(&q->queue_lock, flags); in bfq_bic_lookup()
424 if (bfqd->queued != 0) { in bfq_schedule_dispatch()
426 blk_mq_run_hw_queues(bfqd->queue, true); in bfq_schedule_dispatch()
430 #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
435 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
459 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) in bfq_choose_req()
461 else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) in bfq_choose_req()
470 back_max = bfqd->bfq_back_max * 2; in bfq_choose_req()
478 d1 = s1 - last; in bfq_choose_req()
480 d1 = (last - s1) * bfqd->bfq_back_penalty; in bfq_choose_req()
485 d2 = s2 - last; in bfq_choose_req()
487 d2 = (last - s2) * bfqd->bfq_back_penalty; in bfq_choose_req()
495 * check two variables for all permutations: --> faster! in bfq_choose_req()
518 * (--> only *one* back seek required), in bfq_choose_req()
532 * Limit depths of async I/O and sync writes so as to counter both
537 struct bfq_data *bfqd = data->q->elevator->elevator_data; in bfq_limit_depth()
542 data->shallow_depth = in bfq_limit_depth()
543 bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; in bfq_limit_depth()
546 __func__, bfqd->wr_busy_queues, op_is_sync(op), in bfq_limit_depth()
547 data->shallow_depth); in bfq_limit_depth()
559 p = &root->rb_node; in bfq_rq_pos_tree_lookup()
570 if (sector > blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup()
571 n = &(*p)->rb_right; in bfq_rq_pos_tree_lookup()
572 else if (sector < blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup()
573 n = &(*p)->rb_left; in bfq_rq_pos_tree_lookup()
586 bfqq ? bfqq->pid : 0); in bfq_rq_pos_tree_lookup()
593 return bfqq->service_from_backlogged > 0 && in bfq_too_late_for_merging()
594 time_is_before_jiffies(bfqq->first_IO_time + in bfq_too_late_for_merging()
612 if (bfqq->pos_root) { in bfq_pos_tree_add_move()
613 rb_erase(&bfqq->pos_node, bfqq->pos_root); in bfq_pos_tree_add_move()
614 bfqq->pos_root = NULL; in bfq_pos_tree_add_move()
618 if (bfqq == &bfqd->oom_bfqq) in bfq_pos_tree_add_move()
631 if (!bfqq->next_rq) in bfq_pos_tree_add_move()
634 bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; in bfq_pos_tree_add_move()
635 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, in bfq_pos_tree_add_move()
636 blk_rq_pos(bfqq->next_rq), &parent, &p); in bfq_pos_tree_add_move()
638 rb_link_node(&bfqq->pos_node, parent, p); in bfq_pos_tree_add_move()
639 rb_insert_color(&bfqq->pos_node, bfqq->pos_root); in bfq_pos_tree_add_move()
641 bfqq->pos_root = NULL; in bfq_pos_tree_add_move()
654 * of this function is used to check whether I/O-dispatch plugging can
659 * 2) all active queues belong to the same I/O-priority class,
666 * the last two symmetry sub-conditions above would be quite complex
668 * only the following stronger three sub-conditions, for which it is
671 * 2) all active queues belong to the same I/O-priority class,
681 bfqq->weight_counter && in bfq_asymmetric_scenario()
682 bfqq->weight_counter == in bfq_asymmetric_scenario()
684 rb_first_cached(&bfqd->queue_weights_tree), in bfq_asymmetric_scenario()
693 !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) && in bfq_asymmetric_scenario()
694 (bfqd->queue_weights_tree.rb_root.rb_node->rb_left || in bfq_asymmetric_scenario()
695 bfqd->queue_weights_tree.rb_root.rb_node->rb_right); in bfq_asymmetric_scenario()
698 (bfqd->busy_queues[0] && bfqd->busy_queues[1]) || in bfq_asymmetric_scenario()
699 (bfqd->busy_queues[0] && bfqd->busy_queues[2]) || in bfq_asymmetric_scenario()
700 (bfqd->busy_queues[1] && bfqd->busy_queues[2]); in bfq_asymmetric_scenario()
704 || bfqd->num_groups_with_pending_reqs > 0 in bfq_asymmetric_scenario()
710 * If the weight-counter tree passed as input contains no counter for
714 * Note that weight-counter trees contain few nodes in mostly symmetric
716 * weight-counter tree for the queues may contain at most one node.
717 * This holds even if low_latency is on, because weight-raised queues
725 struct bfq_entity *entity = &bfqq->entity; in bfq_weights_tree_add()
726 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; in bfq_weights_tree_add()
733 * non-weight-raised, and hence change its weight, and in bfq_weights_tree_add()
741 if (bfqq->weight_counter) in bfq_weights_tree_add()
750 if (entity->weight == __counter->weight) { in bfq_weights_tree_add()
751 bfqq->weight_counter = __counter; in bfq_weights_tree_add()
754 if (entity->weight < __counter->weight) in bfq_weights_tree_add()
755 new = &((*new)->rb_left); in bfq_weights_tree_add()
757 new = &((*new)->rb_right); in bfq_weights_tree_add()
762 bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), in bfq_weights_tree_add()
775 * if !bfqq->weight_counter. in bfq_weights_tree_add()
777 if (unlikely(!bfqq->weight_counter)) in bfq_weights_tree_add()
780 bfqq->weight_counter->weight = entity->weight; in bfq_weights_tree_add()
781 rb_link_node(&bfqq->weight_counter->weights_node, parent, new); in bfq_weights_tree_add()
782 rb_insert_color_cached(&bfqq->weight_counter->weights_node, root, in bfq_weights_tree_add()
786 bfqq->weight_counter->num_active++; in bfq_weights_tree_add()
787 bfqq->ref++; in bfq_weights_tree_add()
800 if (!bfqq->weight_counter) in __bfq_weights_tree_remove()
803 bfqq->weight_counter->num_active--; in __bfq_weights_tree_remove()
804 if (bfqq->weight_counter->num_active > 0) in __bfq_weights_tree_remove()
807 rb_erase_cached(&bfqq->weight_counter->weights_node, root); in __bfq_weights_tree_remove()
808 kfree(bfqq->weight_counter); in __bfq_weights_tree_remove()
811 bfqq->weight_counter = NULL; in __bfq_weights_tree_remove()
822 struct bfq_entity *entity = bfqq->entity.parent; in bfq_weights_tree_remove()
825 struct bfq_sched_data *sd = entity->my_sched_data; in bfq_weights_tree_remove()
827 if (sd->next_in_service || sd->in_service_entity) { in bfq_weights_tree_remove()
852 if (entity->in_groups_with_pending_reqs) { in bfq_weights_tree_remove()
853 entity->in_groups_with_pending_reqs = false; in bfq_weights_tree_remove()
854 bfqd->num_groups_with_pending_reqs--; in bfq_weights_tree_remove()
865 &bfqd->queue_weights_tree); in bfq_weights_tree_remove()
881 rq = rq_entry_fifo(bfqq->fifo.next); in bfq_check_fifo()
883 if (rq == last || ktime_get_ns() < rq->fifo_time) in bfq_check_fifo()
886 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); in bfq_check_fifo()
894 struct rb_node *rbnext = rb_next(&last->rb_node); in bfq_find_next_rq()
895 struct rb_node *rbprev = rb_prev(&last->rb_node); in bfq_find_next_rq()
909 rbnext = rb_first(&bfqq->sort_list); in bfq_find_next_rq()
910 if (rbnext && rbnext != &last->rb_node) in bfq_find_next_rq()
921 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 || in bfq_serv_to_charge()
922 bfq_asymmetric_scenario(bfqq->bfqd, bfqq)) in bfq_serv_to_charge()
929 * bfq_updated_next_req - update the queue after a new next_rq selection.
942 struct bfq_entity *entity = &bfqq->entity; in bfq_updated_next_req()
943 struct request *next_rq = bfqq->next_rq; in bfq_updated_next_req()
949 if (bfqq == bfqd->in_service_queue) in bfq_updated_next_req()
957 max_t(unsigned long, bfqq->max_budget, in bfq_updated_next_req()
959 entity->service); in bfq_updated_next_req()
960 if (entity->budget != new_budget) { in bfq_updated_next_req()
961 entity->budget = new_budget; in bfq_updated_next_req()
972 if (bfqd->bfq_wr_max_time > 0) in bfq_wr_duration()
973 return bfqd->bfq_wr_max_time; in bfq_wr_duration()
975 dur = bfqd->rate_dur_prod; in bfq_wr_duration()
976 do_div(dur, bfqd->peak_rate); in bfq_wr_duration()
979 * Limit duration between 3 and 25 seconds. The upper limit in bfq_wr_duration()
982 * - running in a slow PC in bfq_wr_duration()
983 * - with a virtual disk stacked on a slow low-end 5400rpm HDD in bfq_wr_duration()
984 * - serving a heavy I/O workload, such as the sequential reading in bfq_wr_duration()
986 * mplayer took 23 seconds to start, if constantly weight-raised. in bfq_wr_duration()
991 * responsiveness by allowing non-interactive applications to in bfq_wr_duration()
996 * before weight-raising finishes. in bfq_wr_duration()
1001 /* switch back from soft real-time to interactive weight raising */
1005 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in switch_back_to_interactive_wr()
1006 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in switch_back_to_interactive_wr()
1007 bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt; in switch_back_to_interactive_wr()
1014 unsigned int old_wr_coeff = bfqq->wr_coeff; in bfq_bfqq_resume_state()
1017 if (bic->saved_has_short_ttime) in bfq_bfqq_resume_state()
1022 if (bic->saved_IO_bound) in bfq_bfqq_resume_state()
1027 bfqq->entity.new_weight = bic->saved_weight; in bfq_bfqq_resume_state()
1028 bfqq->ttime = bic->saved_ttime; in bfq_bfqq_resume_state()
1029 bfqq->wr_coeff = bic->saved_wr_coeff; in bfq_bfqq_resume_state()
1030 bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; in bfq_bfqq_resume_state()
1031 bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; in bfq_bfqq_resume_state()
1032 bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; in bfq_bfqq_resume_state()
1034 if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || in bfq_bfqq_resume_state()
1035 time_is_before_jiffies(bfqq->last_wr_start_finish + in bfq_bfqq_resume_state()
1036 bfqq->wr_cur_max_time))) { in bfq_bfqq_resume_state()
1037 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in bfq_bfqq_resume_state()
1039 time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt + in bfq_bfqq_resume_state()
1043 bfqq->wr_coeff = 1; in bfq_bfqq_resume_state()
1044 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfq_bfqq_resume_state()
1050 bfqq->entity.prio_changed = 1; in bfq_bfqq_resume_state()
1055 if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) in bfq_bfqq_resume_state()
1056 bfqd->wr_busy_queues++; in bfq_bfqq_resume_state()
1057 else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) in bfq_bfqq_resume_state()
1058 bfqd->wr_busy_queues--; in bfq_bfqq_resume_state()
1063 return bfqq->ref - bfqq->allocated - bfqq->entity.on_st_or_in_serv - in bfqq_process_refs()
1064 (bfqq->weight_counter != NULL); in bfqq_process_refs()
1073 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) in bfq_reset_burst_list()
1074 hlist_del_init(&item->burst_list_node); in bfq_reset_burst_list()
1082 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); in bfq_reset_burst_list()
1083 bfqd->burst_size = 1; in bfq_reset_burst_list()
1085 bfqd->burst_size = 0; in bfq_reset_burst_list()
1087 bfqd->burst_parent_entity = bfqq->entity.parent; in bfq_reset_burst_list()
1090 /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
1094 bfqd->burst_size++; in bfq_add_to_burst()
1096 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { in bfq_add_to_burst()
1104 bfqd->large_burst = true; in bfq_add_to_burst()
1110 hlist_for_each_entry(bfqq_item, &bfqd->burst_list, in bfq_add_to_burst()
1116 * From now on, and until the current burst finishes, any in bfq_add_to_burst()
1122 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, in bfq_add_to_burst()
1124 hlist_del_init(&pos->burst_list_node); in bfq_add_to_burst()
1131 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); in bfq_add_to_burst()
1141 * possible, it is usually better to not grant either weight-raising
1153 * completed. As a consequence, weight-raising any of these queues,
1157 * weight-raising these new queues just lowers throughput in most
1162 * parallel I/O-bound threads. In fact, with a complex application,
1163 * several short processes may need to be executed to start-up the
1169 * weight-raise all the queues created during the burst. This is the
1179 * weight-raise queues whose creation occurs in a large burst. In
1206 * . if the current burst has not yet become large, and a queue Q that does
1212 * the large-burst threshold, then
1220 * previous sub-step), and now is not needed any more
1222 * . the device enters a large-burst mode
1225 * the device is in large-burst mode and shortly after the last time
1227 * belonging to the current large burst, then Q is immediately marked
1233 * current large burst, then the current burst is deemed as finished and:
1235 * . the large-burst mode is reset if set
1250 if (!hlist_unhashed(&bfqq->burst_list_node) || in bfq_handle_burst()
1252 time_is_after_eq_jiffies(bfqq->split_time + in bfq_handle_burst()
1258 * a different group than the burst group, then the current in bfq_handle_burst()
1273 if (time_is_before_jiffies(bfqd->last_ins_in_burst + in bfq_handle_burst()
1274 bfqd->bfq_burst_interval) || in bfq_handle_burst()
1275 bfqq->entity.parent != bfqd->burst_parent_entity) { in bfq_handle_burst()
1276 bfqd->large_burst = false; in bfq_handle_burst()
1283 * last queue. So, if the current burst is also large, we can mark in bfq_handle_burst()
1286 if (bfqd->large_burst) { in bfq_handle_burst()
1292 * If we get here, then a large-burst state has not yet been in bfq_handle_burst()
1299 * At this point, bfqq either has been added to the current in bfq_handle_burst()
1300 * burst or has caused the current burst to terminate and a in bfq_handle_burst()
1306 bfqd->last_ins_in_burst = jiffies; in bfq_handle_burst()
1311 struct bfq_entity *entity = &bfqq->entity; in bfq_bfqq_budget_left()
1313 return entity->budget - entity->service; in bfq_bfqq_budget_left()
1317 * If enough samples have been computed, return the current max budget
1323 if (bfqd->budgets_assigned < bfq_stats_min_budgets) in bfq_max_budget()
1326 return bfqd->bfq_max_budget; in bfq_max_budget()
1330 * Return min budget, which is a fraction of the current or default
1335 if (bfqd->budgets_assigned < bfq_stats_min_budgets) in bfq_min_budget()
1338 return bfqd->bfq_max_budget / 32; in bfq_min_budget()
1344 * whether the in-service queue should be expired, by returning
1345 * true. The purpose of expiring the in-service queue is to give bfqq
1346 * the chance to possibly preempt the in-service queue, and the reason
1347 * for preempting the in-service queue is to achieve one of the two
1354 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1358 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1359 * a new request before the expiration of the idling-time.
1388 * before last expiration. Thus timestamps need to be back-shifted
1392 * Secondly, to allow the process to recover the hole, the in-service
1395 * in-service queue to be completed, then it may become impossible to
1396 * let the process recover the hole, even if the back-shifted
1397 * timestamps of bfqq are lower than those of the in-service queue. If
1413 * above-described special way, and signals that the in-service queue
1414 * should be expired. Timestamp back-shifting is done later in
1420 * timestamp than the in-service queue. That is, the next budget of
1421 * bfqq may have to be completed before the one of the in-service
1422 * queue. If this is the case, then preempting the in-service queue
1428 * the in-service queue must be preempted. To have service trees
1429 * correctly updated, the in-service queue must be expired and
1432 * mechanism may be re-designed in such a way to make it possible to
1436 * even be no in-service queue when the next function is invoked (so,
1441 * in-service queue (unconditionally) only for queues that need to
1449 struct bfq_entity *entity = &bfqq->entity; in bfq_bfqq_update_budg_for_activation()
1463 * that timestamps need to be back-shifted (and is in bfq_bfqq_update_budg_for_activation()
1469 * entity->service or entity->budget are not updated in bfq_bfqq_update_budg_for_activation()
1474 * entity->budget the remaining budget on such an in bfq_bfqq_update_budg_for_activation()
1477 entity->budget = min_t(unsigned long, in bfq_bfqq_update_budg_for_activation()
1479 bfqq->max_budget); in bfq_bfqq_update_budg_for_activation()
1482 * At this point, we have used entity->service to get in bfq_bfqq_update_budg_for_activation()
1484 * entity->budget). Thus we finally can, and have to, in bfq_bfqq_update_budg_for_activation()
1485 * reset entity->service. The latter must be reset in bfq_bfqq_update_budg_for_activation()
1490 entity->service = 0; in bfq_bfqq_update_budg_for_activation()
1498 entity->service = 0; in bfq_bfqq_update_budg_for_activation()
1499 entity->budget = max_t(unsigned long, bfqq->max_budget, in bfq_bfqq_update_budg_for_activation()
1500 bfq_serv_to_charge(bfqq->next_rq, bfqq)); in bfq_bfqq_update_budg_for_activation()
1511 return jiffies - MAX_JIFFY_OFFSET; in bfq_smallest_from_now()
1523 /* start a weight-raising period */ in bfq_update_bfqq_wr_on_rq_arrival()
1525 bfqq->service_from_wr = 0; in bfq_update_bfqq_wr_on_rq_arrival()
1526 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_update_bfqq_wr_on_rq_arrival()
1527 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_update_bfqq_wr_on_rq_arrival()
1533 * that, at the end of the soft-real-time in bfq_update_bfqq_wr_on_rq_arrival()
1535 * now, no interactive weight-raising period in bfq_update_bfqq_wr_on_rq_arrival()
1540 bfqq->wr_start_at_switch_to_srt = in bfq_update_bfqq_wr_on_rq_arrival()
1542 bfqq->wr_coeff = bfqd->bfq_wr_coeff * in bfq_update_bfqq_wr_on_rq_arrival()
1544 bfqq->wr_cur_max_time = in bfq_update_bfqq_wr_on_rq_arrival()
1545 bfqd->bfq_wr_rt_max_time; in bfq_update_bfqq_wr_on_rq_arrival()
1551 * scheduling-error component due to a too large in bfq_update_bfqq_wr_on_rq_arrival()
1557 bfqq->entity.budget = min_t(unsigned long, in bfq_update_bfqq_wr_on_rq_arrival()
1558 bfqq->entity.budget, in bfq_update_bfqq_wr_on_rq_arrival()
1562 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_update_bfqq_wr_on_rq_arrival()
1563 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_update_bfqq_wr_on_rq_arrival()
1565 bfqq->wr_coeff = 1; in bfq_update_bfqq_wr_on_rq_arrival()
1571 * the weight-raising duration for the in bfq_update_bfqq_wr_on_rq_arrival()
1572 * application with the weight-raising in bfq_update_bfqq_wr_on_rq_arrival()
1576 * before the weight-raising period for the in bfq_update_bfqq_wr_on_rq_arrival()
1583 * at a certain time weight-raising is in bfq_update_bfqq_wr_on_rq_arrival()
1594 * weight-raised while they are pending. in bfq_update_bfqq_wr_on_rq_arrival()
1596 if (bfqq->wr_cur_max_time != in bfq_update_bfqq_wr_on_rq_arrival()
1597 bfqd->bfq_wr_rt_max_time) { in bfq_update_bfqq_wr_on_rq_arrival()
1598 bfqq->wr_start_at_switch_to_srt = in bfq_update_bfqq_wr_on_rq_arrival()
1599 bfqq->last_wr_start_finish; in bfq_update_bfqq_wr_on_rq_arrival()
1601 bfqq->wr_cur_max_time = in bfq_update_bfqq_wr_on_rq_arrival()
1602 bfqd->bfq_wr_rt_max_time; in bfq_update_bfqq_wr_on_rq_arrival()
1603 bfqq->wr_coeff = bfqd->bfq_wr_coeff * in bfq_update_bfqq_wr_on_rq_arrival()
1606 bfqq->last_wr_start_finish = jiffies; in bfq_update_bfqq_wr_on_rq_arrival()
1614 return bfqq->dispatched == 0 && in bfq_bfqq_idle_for_long_time()
1616 bfqq->budget_timeout + in bfq_bfqq_idle_for_long_time()
1617 bfqd->bfq_wr_min_idle_time); in bfq_bfqq_idle_for_long_time()
1623 * weight than the in-service queue.
1630 if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class) in bfq_bfqq_higher_class_or_weight()
1633 if (in_serv_bfqq->entity.parent == bfqq->entity.parent) { in bfq_bfqq_higher_class_or_weight()
1634 bfqq_weight = bfqq->entity.weight; in bfq_bfqq_higher_class_or_weight()
1635 in_serv_weight = in_serv_bfqq->entity.weight; in bfq_bfqq_higher_class_or_weight()
1637 if (bfqq->entity.parent) in bfq_bfqq_higher_class_or_weight()
1638 bfqq_weight = bfqq->entity.parent->weight; in bfq_bfqq_higher_class_or_weight()
1640 bfqq_weight = bfqq->entity.weight; in bfq_bfqq_higher_class_or_weight()
1641 if (in_serv_bfqq->entity.parent) in bfq_bfqq_higher_class_or_weight()
1642 in_serv_weight = in_serv_bfqq->entity.parent->weight; in bfq_bfqq_higher_class_or_weight()
1644 in_serv_weight = in_serv_bfqq->entity.weight; in bfq_bfqq_higher_class_or_weight()
1665 bfqq->ttime.last_end_request + in bfq_bfqq_handle_idle_busy_switch()
1666 bfqd->bfq_slice_idle * 3; in bfq_bfqq_handle_idle_busy_switch()
1670 * bfqq deserves to be weight-raised if: in bfq_bfqq_handle_idle_busy_switch()
1671 * - it is sync, in bfq_bfqq_handle_idle_busy_switch()
1672 * - it does not belong to a large burst, in bfq_bfqq_handle_idle_busy_switch()
1673 * - it has been idle for enough time or is soft real-time, in bfq_bfqq_handle_idle_busy_switch()
1674 * - is linked to a bfq_io_cq (it is not shared in any sense). in bfq_bfqq_handle_idle_busy_switch()
1677 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && in bfq_bfqq_handle_idle_busy_switch()
1680 time_is_before_jiffies(bfqq->soft_rt_next_start) && in bfq_bfqq_handle_idle_busy_switch()
1681 bfqq->dispatched == 0; in bfq_bfqq_handle_idle_busy_switch()
1683 wr_or_deserves_wr = bfqd->low_latency && in bfq_bfqq_handle_idle_busy_switch()
1684 (bfqq->wr_coeff > 1 || in bfq_bfqq_handle_idle_busy_switch()
1686 bfqq->bic && (*interactive || soft_rt))); in bfq_bfqq_handle_idle_busy_switch()
1690 * may want to preempt the in-service queue. in bfq_bfqq_handle_idle_busy_switch()
1712 bfqq->budget_timeout + in bfq_bfqq_handle_idle_busy_switch()
1714 hlist_del_init(&bfqq->burst_list_node); in bfq_bfqq_handle_idle_busy_switch()
1723 bfqq->requests_within_timer++; in bfq_bfqq_handle_idle_busy_switch()
1724 if (bfqq->requests_within_timer >= in bfq_bfqq_handle_idle_busy_switch()
1725 bfqd->bfq_requests_within_timer) in bfq_bfqq_handle_idle_busy_switch()
1728 bfqq->requests_within_timer = 0; in bfq_bfqq_handle_idle_busy_switch()
1731 if (bfqd->low_latency) { in bfq_bfqq_handle_idle_busy_switch()
1732 if (unlikely(time_is_after_jiffies(bfqq->split_time))) in bfq_bfqq_handle_idle_busy_switch()
1734 bfqq->split_time = in bfq_bfqq_handle_idle_busy_switch()
1735 jiffies - bfqd->bfq_wr_min_idle_time - 1; in bfq_bfqq_handle_idle_busy_switch()
1737 if (time_is_before_jiffies(bfqq->split_time + in bfq_bfqq_handle_idle_busy_switch()
1738 bfqd->bfq_wr_min_idle_time)) { in bfq_bfqq_handle_idle_busy_switch()
1746 if (old_wr_coeff != bfqq->wr_coeff) in bfq_bfqq_handle_idle_busy_switch()
1747 bfqq->entity.prio_changed = 1; in bfq_bfqq_handle_idle_busy_switch()
1751 bfqq->last_idle_bklogged = jiffies; in bfq_bfqq_handle_idle_busy_switch()
1752 bfqq->service_from_backlogged = 0; in bfq_bfqq_handle_idle_busy_switch()
1758 * Expire in-service queue only if preemption may be needed in bfq_bfqq_handle_idle_busy_switch()
1764 * carry time-critical I/O, then bfqq's bandwidth is less in bfq_bfqq_handle_idle_busy_switch()
1765 * important than that of queues that carry time-critical I/O. in bfq_bfqq_handle_idle_busy_switch()
1767 * bfqq is at least as weight-raised, i.e., at least as time in bfq_bfqq_handle_idle_busy_switch()
1768 * critical, as the in-service queue. in bfq_bfqq_handle_idle_busy_switch()
1771 * or has a higher weight than the in-service queue. If this in bfq_bfqq_handle_idle_busy_switch()
1778 * the timestamps of both bfqq and of the in-service queue, in bfq_bfqq_handle_idle_busy_switch()
1785 * timestamps of the in-service queue would need to be in bfq_bfqq_handle_idle_busy_switch()
1789 if (bfqd->in_service_queue && in bfq_bfqq_handle_idle_busy_switch()
1791 bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) || in bfq_bfqq_handle_idle_busy_switch()
1792 bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue)) && in bfq_bfqq_handle_idle_busy_switch()
1794 bfq_bfqq_expire(bfqd, bfqd->in_service_queue, in bfq_bfqq_handle_idle_busy_switch()
1802 bfqq->last_serv_time_ns = 0; in bfq_reset_inject_limit()
1808 bfqd->waited_rq = NULL; in bfq_reset_inject_limit()
1812 * inject limit to 0 prudentially, because the service time of in bfq_reset_inject_limit()
1818 * adaptive update will however raise the limit soon. This in bfq_reset_inject_limit()
1821 * get new I/O enqueued---and then completed---before being in bfq_reset_inject_limit()
1823 * limit-update algorithm the chance to measure the effect of in bfq_reset_inject_limit()
1825 * limit accordingly. in bfq_reset_inject_limit()
1827 * However, in the following special case, the inject limit is in bfq_reset_inject_limit()
1831 * completed. Keeping the inject limit to 1 allows the in bfq_reset_inject_limit()
1846 * times and update the inject limit accordingly (see comments in bfq_reset_inject_limit()
1847 * on bfq_update_inject_limit()). So the limit is likely to be in bfq_reset_inject_limit()
1849 * setting the limit to 1, we avoid that no injection ever in bfq_reset_inject_limit()
1853 * limit-update algorithm and possibly raise the limit to more in bfq_reset_inject_limit()
1857 bfqq->inject_limit = 0; in bfq_reset_inject_limit()
1859 bfqq->inject_limit = 1; in bfq_reset_inject_limit()
1861 bfqq->decrease_time_jif = jiffies; in bfq_reset_inject_limit()
1867 struct bfq_data *bfqd = bfqq->bfqd; in bfq_add_request()
1869 unsigned int old_wr_coeff = bfqq->wr_coeff; in bfq_add_request()
1873 bfqq->queued[rq_is_sync(rq)]++; in bfq_add_request()
1874 bfqd->queued++; in bfq_add_request()
1876 if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) { in bfq_add_request()
1886 * A remarkable throughput boost can be reached by in bfq_add_request()
1910 * last filter, plus the above two-times requirement, in bfq_add_request()
1920 * time. This implies that bfqq's inject limit is at in bfq_add_request()
1924 * during the very first I/O-plugging time interval in bfq_add_request()
1929 * I/O-plugging interval for bfqq. in bfq_add_request()
1931 if (bfqd->last_completed_rq_bfqq && in bfq_add_request()
1933 ktime_get_ns() - bfqd->last_completion < in bfq_add_request()
1935 if (bfqd->last_completed_rq_bfqq != bfqq && in bfq_add_request()
1936 bfqd->last_completed_rq_bfqq != in bfq_add_request()
1937 bfqq->waker_bfqq) { in bfq_add_request()
1942 * from the current one. in bfq_add_request()
1944 bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq; in bfq_add_request()
1948 * bfqq->waker_bfqq must be reset. To in bfq_add_request()
1966 if (!hlist_unhashed(&bfqq->woken_list_node)) in bfq_add_request()
1967 hlist_del_init(&bfqq->woken_list_node); in bfq_add_request()
1968 hlist_add_head(&bfqq->woken_list_node, in bfq_add_request()
1969 &bfqd->last_completed_rq_bfqq->woken_list); in bfq_add_request()
1972 } else if (bfqd->last_completed_rq_bfqq == in bfq_add_request()
1973 bfqq->waker_bfqq && in bfq_add_request()
1984 * Periodically reset inject limit, to make sure that in bfq_add_request()
1989 if (time_is_before_eq_jiffies(bfqq->decrease_time_jif + in bfq_add_request()
1996 * update of the inject limit: in bfq_add_request()
1997 * - bfqq is in service, because the total service in bfq_add_request()
2000 * - this is the right occasion to compute or to in bfq_add_request()
2006 * quantity needed to update the inject limit, i.e., in bfq_add_request()
2008 * injection allowed by the current value of the in bfq_add_request()
2009 * limit. It is the right occasion because injection in bfq_add_request()
2011 * hole, and there are still in-flight requests, in bfq_add_request()
2014 * - the minimum interval for sampling the total in bfq_add_request()
2015 * service time and updating the inject limit has in bfq_add_request()
2018 if (bfqq == bfqd->in_service_queue && in bfq_add_request()
2019 (bfqd->rq_in_driver == 0 || in bfq_add_request()
2020 (bfqq->last_serv_time_ns > 0 && in bfq_add_request()
2021 bfqd->rqs_injected && bfqd->rq_in_driver > 0)) && in bfq_add_request()
2022 time_is_before_eq_jiffies(bfqq->decrease_time_jif + in bfq_add_request()
2024 bfqd->last_empty_occupied_ns = ktime_get_ns(); in bfq_add_request()
2028 * wait_dispatch will cause bfqd->waited_rq to in bfq_add_request()
2031 bfqd->wait_dispatch = true; in bfq_add_request()
2036 * service time of rq. So the injection limit in bfq_add_request()
2040 * injection limit updated only in the latter in bfq_add_request()
2045 if (bfqd->rq_in_driver == 0) in bfq_add_request()
2046 bfqd->rqs_injected = false; in bfq_add_request()
2050 elv_rb_add(&bfqq->sort_list, rq); in bfq_add_request()
2053 * Check if this request is a better next-serve candidate. in bfq_add_request()
2055 prev = bfqq->next_rq; in bfq_add_request()
2056 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); in bfq_add_request()
2057 bfqq->next_rq = next_rq; in bfq_add_request()
2063 if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq)) in bfq_add_request()
2070 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && in bfq_add_request()
2072 bfqq->last_wr_start_finish + in bfq_add_request()
2073 bfqd->bfq_wr_min_inter_arr_async)) { in bfq_add_request()
2074 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_add_request()
2075 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_add_request()
2077 bfqd->wr_busy_queues++; in bfq_add_request()
2078 bfqq->entity.prio_changed = 1; in bfq_add_request()
2080 if (prev != bfqq->next_rq) in bfq_add_request()
2088 * . if bfqq is not going to be weight-raised, because, for in bfq_add_request()
2089 * non weight-raised queues, last_wr_start_finish stores the in bfq_add_request()
2092 * weight-raise async queues in bfq_add_request()
2094 * . if bfqq is not weight-raised, because, if bfqq is now in bfq_add_request()
2095 * switching to weight-raised, then last_wr_start_finish in bfq_add_request()
2096 * stores the time when weight-raising starts in bfq_add_request()
2099 * bfqq is currently weight-raised, the weight-raising in bfq_add_request()
2102 * conditions, if bfqq is already weight-raised) in bfq_add_request()
2105 * real-time, because the weight-raising period is constantly in bfq_add_request()
2106 * restarted on idle-to-busy transitions for these queues, but in bfq_add_request()
2110 if (bfqd->low_latency && in bfq_add_request()
2111 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) in bfq_add_request()
2112 bfqq->last_wr_start_finish = jiffies; in bfq_add_request()
2119 struct bfq_queue *bfqq = bfqd->bio_bfqq; in bfq_find_rq_fmerge()
2123 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); in bfq_find_rq_fmerge()
2131 return abs(blk_rq_pos(rq) - last_pos); in get_sdist()
2139 struct bfq_data *bfqd = q->elevator->elevator_data;
2141 bfqd->rq_in_driver++;
2146 struct bfq_data *bfqd = q->elevator->elevator_data;
2148 bfqd->rq_in_driver--;
2156 struct bfq_data *bfqd = bfqq->bfqd; in bfq_remove_request()
2159 if (bfqq->next_rq == rq) { in bfq_remove_request()
2160 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); in bfq_remove_request()
2164 if (rq->queuelist.prev != &rq->queuelist) in bfq_remove_request()
2165 list_del_init(&rq->queuelist); in bfq_remove_request()
2166 bfqq->queued[sync]--; in bfq_remove_request()
2167 bfqd->queued--; in bfq_remove_request()
2168 elv_rb_del(&bfqq->sort_list, rq); in bfq_remove_request()
2171 if (q->last_merge == rq) in bfq_remove_request()
2172 q->last_merge = NULL; in bfq_remove_request()
2174 if (RB_EMPTY_ROOT(&bfqq->sort_list)) { in bfq_remove_request()
2175 bfqq->next_rq = NULL; in bfq_remove_request()
2177 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { in bfq_remove_request()
2181 * bfqq is empty, bfqq->entity.service and in bfq_remove_request()
2182 * bfqq->entity.budget must contain, in bfq_remove_request()
2188 * reset both bfqq->entity.service and in bfq_remove_request()
2189 * bfqq->entity.budget, if bfqq has still a in bfq_remove_request()
2192 bfqq->entity.budget = bfqq->entity.service = 0; in bfq_remove_request()
2196 * Remove queue from request-position tree as it is empty. in bfq_remove_request()
2198 if (bfqq->pos_root) { in bfq_remove_request()
2199 rb_erase(&bfqq->pos_node, bfqq->pos_root); in bfq_remove_request()
2200 bfqq->pos_root = NULL; in bfq_remove_request()
2204 if (unlikely(!bfqd->nonrot_with_queueing)) in bfq_remove_request()
2208 if (rq->cmd_flags & REQ_META) in bfq_remove_request()
2209 bfqq->meta_pending--; in bfq_remove_request()
2216 struct request_queue *q = hctx->queue; in bfq_bio_merge()
2217 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_bio_merge()
2222 * queue_lock inside the bfqd->lock. We assume that the bic in bfq_bio_merge()
2224 * bfqd->lock is taken. in bfq_bio_merge()
2226 struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); in bfq_bio_merge()
2229 spin_lock_irq(&bfqd->lock); in bfq_bio_merge()
2232 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); in bfq_bio_merge()
2234 bfqd->bio_bfqq = NULL; in bfq_bio_merge()
2235 bfqd->bio_bic = bic; in bfq_bio_merge()
2241 spin_unlock_irq(&bfqd->lock); in bfq_bio_merge()
2249 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_request_merge()
2267 rb_prev(&req->rb_node) && in bfq_request_merged()
2269 blk_rq_pos(container_of(rb_prev(&req->rb_node), in bfq_request_merged()
2278 bfqd = bfqq->bfqd; in bfq_request_merged()
2281 elv_rb_del(&bfqq->sort_list, req); in bfq_request_merged()
2282 elv_rb_add(&bfqq->sort_list, req); in bfq_request_merged()
2285 prev = bfqq->next_rq; in bfq_request_merged()
2286 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, in bfq_request_merged()
2287 bfqd->last_position); in bfq_request_merged()
2288 bfqq->next_rq = next_rq; in bfq_request_merged()
2294 if (prev != bfqq->next_rq) { in bfq_request_merged()
2300 if (unlikely(!bfqd->nonrot_with_queueing)) in bfq_request_merged()
2314 * on that rq is picked from the hash table q->elevator->hash, which,
2339 !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && in bfq_requests_merged()
2340 next->fifo_time < rq->fifo_time) { in bfq_requests_merged()
2341 list_del_init(&rq->queuelist); in bfq_requests_merged()
2342 list_replace_init(&next->queuelist, &rq->queuelist); in bfq_requests_merged()
2343 rq->fifo_time = next->fifo_time; in bfq_requests_merged()
2346 if (bfqq->next_rq == next) in bfq_requests_merged()
2347 bfqq->next_rq = rq; in bfq_requests_merged()
2349 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); in bfq_requests_merged()
2356 bfqq->bfqd->wr_busy_queues--; in bfq_bfqq_end_wr()
2357 bfqq->wr_coeff = 1; in bfq_bfqq_end_wr()
2358 bfqq->wr_cur_max_time = 0; in bfq_bfqq_end_wr()
2359 bfqq->last_wr_start_finish = jiffies; in bfq_bfqq_end_wr()
2364 bfqq->entity.prio_changed = 1; in bfq_bfqq_end_wr()
2374 if (bfqg->async_bfqq[i][j]) in bfq_end_wr_async_queues()
2375 bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); in bfq_end_wr_async_queues()
2376 if (bfqg->async_idle_bfqq) in bfq_end_wr_async_queues()
2377 bfq_bfqq_end_wr(bfqg->async_idle_bfqq); in bfq_end_wr_async_queues()
2384 spin_lock_irq(&bfqd->lock); in bfq_end_wr()
2386 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) in bfq_end_wr()
2388 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) in bfq_end_wr()
2392 spin_unlock_irq(&bfqd->lock); in bfq_end_wr()
2400 return ((struct bio *)io_struct)->bi_iter.bi_sector; in bfq_io_struct_pos()
2406 return abs(bfq_io_struct_pos(io_struct, request) - sector) <= in bfq_rq_close_to_sector()
2414 struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; in bfqq_find_close()
2435 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) in bfqq_find_close()
2438 if (blk_rq_pos(__bfqq->next_rq) < sector) in bfqq_find_close()
2439 node = rb_next(&__bfqq->pos_node); in bfqq_find_close()
2441 node = rb_prev(&__bfqq->pos_node); in bfqq_find_close()
2446 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) in bfqq_find_close()
2480 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain in bfq_setup_merge()
2488 while ((__bfqq = new_bfqq->new_bfqq)) { in bfq_setup_merge()
2503 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", in bfq_setup_merge()
2504 new_bfqq->pid); in bfq_setup_merge()
2518 * not available any more (new_bfqq->bic == NULL). in bfq_setup_merge()
2520 * Anyway, even in case new_bfqq coincides with the in-service in bfq_setup_merge()
2521 * queue, redirecting requests the in-service queue is the in bfq_setup_merge()
2522 * best option, as we feed the in-service queue with new in bfq_setup_merge()
2526 bfqq->new_bfqq = new_bfqq; in bfq_setup_merge()
2527 new_bfqq->ref += process_refs; in bfq_setup_merge()
2538 (bfqq->ioprio_class != new_bfqq->ioprio_class)) in bfq_may_be_close_cooperator()
2561 * Attempt to schedule a merge of bfqq with the currently in-service
2573 * WARNING: queue merging may impair fairness among non-weight raised
2577 * requests than the ones produced by its originally-associated
2604 * non-merged queues. This may accentuate workload in bfq_setup_cooperator()
2623 if (likely(bfqd->nonrot_with_queueing)) in bfq_setup_cooperator()
2633 * probability that two non-cooperating processes, which just in bfq_setup_cooperator()
2640 if (bfqq->new_bfqq) in bfq_setup_cooperator()
2641 return bfqq->new_bfqq; in bfq_setup_cooperator()
2643 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) in bfq_setup_cooperator()
2650 in_service_bfqq = bfqd->in_service_queue; in bfq_setup_cooperator()
2653 likely(in_service_bfqq != &bfqd->oom_bfqq) && in bfq_setup_cooperator()
2655 bfqd->in_serv_last_pos) && in bfq_setup_cooperator()
2656 bfqq->entity.parent == in_service_bfqq->entity.parent && in bfq_setup_cooperator()
2670 if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && in bfq_setup_cooperator()
2679 struct bfq_io_cq *bic = bfqq->bic; in bfq_bfqq_save_state()
2682 * If !bfqq->bic, the queue is already shared or its requests in bfq_bfqq_save_state()
2689 bic->saved_weight = bfqq->entity.orig_weight; in bfq_bfqq_save_state()
2690 bic->saved_ttime = bfqq->ttime; in bfq_bfqq_save_state()
2691 bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); in bfq_bfqq_save_state()
2692 bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); in bfq_bfqq_save_state()
2693 bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); in bfq_bfqq_save_state()
2694 bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); in bfq_bfqq_save_state()
2697 bfqq->bfqd->low_latency)) { in bfq_bfqq_save_state()
2701 * did not make it to be set in a weight-raised state, in bfq_bfqq_save_state()
2703 * weight-raising state that would have been assigned in bfq_bfqq_save_state()
2707 bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; in bfq_bfqq_save_state()
2708 bic->saved_wr_start_at_switch_to_srt = bfq_smallest_from_now(); in bfq_bfqq_save_state()
2709 bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd); in bfq_bfqq_save_state()
2710 bic->saved_last_wr_start_finish = jiffies; in bfq_bfqq_save_state()
2712 bic->saved_wr_coeff = bfqq->wr_coeff; in bfq_bfqq_save_state()
2713 bic->saved_wr_start_at_switch_to_srt = in bfq_bfqq_save_state()
2714 bfqq->wr_start_at_switch_to_srt; in bfq_bfqq_save_state()
2715 bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; in bfq_bfqq_save_state()
2716 bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; in bfq_bfqq_save_state()
2733 if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) && in bfq_release_process_ref()
2734 bfqq != bfqd->in_service_queue) in bfq_release_process_ref()
2745 (unsigned long)new_bfqq->pid); in bfq_merge_bfqqs()
2754 * If bfqq is weight-raised, then let new_bfqq inherit in bfq_merge_bfqqs()
2755 * weight-raising. To reduce false positives, neglect the case in bfq_merge_bfqqs()
2757 * to be weight-raised (which may happen because EQM may merge in bfq_merge_bfqqs()
2762 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { in bfq_merge_bfqqs()
2763 new_bfqq->wr_coeff = bfqq->wr_coeff; in bfq_merge_bfqqs()
2764 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; in bfq_merge_bfqqs()
2765 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; in bfq_merge_bfqqs()
2766 new_bfqq->wr_start_at_switch_to_srt = in bfq_merge_bfqqs()
2767 bfqq->wr_start_at_switch_to_srt; in bfq_merge_bfqqs()
2769 bfqd->wr_busy_queues++; in bfq_merge_bfqqs()
2770 new_bfqq->entity.prio_changed = 1; in bfq_merge_bfqqs()
2773 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ in bfq_merge_bfqqs()
2774 bfqq->wr_coeff = 1; in bfq_merge_bfqqs()
2775 bfqq->entity.prio_changed = 1; in bfq_merge_bfqqs()
2777 bfqd->wr_busy_queues--; in bfq_merge_bfqqs()
2781 bfqd->wr_busy_queues); in bfq_merge_bfqqs()
2790 * set new_bfqq->bic to NULL. bfqq either: in bfq_merge_bfqqs()
2791 * - does not belong to any bic any more, and hence bfqq->bic must in bfq_merge_bfqqs()
2793 * - is a queue whose owning bics have already been redirected to a in bfq_merge_bfqqs()
2795 * any bic soon and bfqq->bic is already NULL (therefore the next in bfq_merge_bfqqs()
2798 new_bfqq->bic = NULL; in bfq_merge_bfqqs()
2805 * We mark such a queue with a pid -1, and then print SHARED instead of in bfq_merge_bfqqs()
2808 new_bfqq->pid = -1; in bfq_merge_bfqqs()
2809 bfqq->bic = NULL; in bfq_merge_bfqqs()
2816 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_allow_bio_merge()
2817 bool is_sync = op_is_sync(bio->bi_opf); in bfq_allow_bio_merge()
2818 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; in bfq_allow_bio_merge()
2846 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, in bfq_allow_bio_merge()
2856 * Change also bqfd->bio_bfqq, as in bfq_allow_bio_merge()
2857 * bfqd->bio_bic now points to new_bfqq, and in bfq_allow_bio_merge()
2859 * use again bqfd->bio_bfqq). in bfq_allow_bio_merge()
2861 bfqd->bio_bfqq = bfqq; in bfq_allow_bio_merge()
2868 * Set the maximum time for the in-service queue to consume its
2870 * In practice, a time-slice service scheme is used with seeky
2878 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) in bfq_set_budget_timeout()
2881 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; in bfq_set_budget_timeout()
2883 bfqd->last_budget_start = ktime_get(); in bfq_set_budget_timeout()
2885 bfqq->budget_timeout = jiffies + in bfq_set_budget_timeout()
2886 bfqd->bfq_timeout * timeout_coeff; in bfq_set_budget_timeout()
2895 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; in __bfq_set_in_service_queue()
2897 if (time_is_before_jiffies(bfqq->last_wr_start_finish) && in __bfq_set_in_service_queue()
2898 bfqq->wr_coeff > 1 && in __bfq_set_in_service_queue()
2899 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in __bfq_set_in_service_queue()
2900 time_is_before_jiffies(bfqq->budget_timeout)) { in __bfq_set_in_service_queue()
2902 * For soft real-time queues, move the start in __bfq_set_in_service_queue()
2903 * of the weight-raising period forward by the in __bfq_set_in_service_queue()
2907 * weight-raising period of the queue to end, in __bfq_set_in_service_queue()
2909 * weight-raising period of a soft real-time in __bfq_set_in_service_queue()
2912 * because soft real-time queues are not in __bfq_set_in_service_queue()
2925 if (time_after(bfqq->budget_timeout, in __bfq_set_in_service_queue()
2926 bfqq->last_wr_start_finish)) in __bfq_set_in_service_queue()
2927 bfqq->last_wr_start_finish += in __bfq_set_in_service_queue()
2928 jiffies - bfqq->budget_timeout; in __bfq_set_in_service_queue()
2930 bfqq->last_wr_start_finish = jiffies; in __bfq_set_in_service_queue()
2935 "set_in_service_queue, cur-budget = %d", in __bfq_set_in_service_queue()
2936 bfqq->entity.budget); in __bfq_set_in_service_queue()
2939 bfqd->in_service_queue = bfqq; in __bfq_set_in_service_queue()
2955 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_arm_slice_timer()
2962 * fair distribution of slice time for a process doing back-to-back in bfq_arm_slice_timer()
2965 sl = bfqd->bfq_slice_idle; in bfq_arm_slice_timer()
2967 * Unless the queue is being weight-raised or the scenario is in bfq_arm_slice_timer()
2969 * is seeky. A long idling is preserved for a weight-raised in bfq_arm_slice_timer()
2976 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && in bfq_arm_slice_timer()
2979 else if (bfqq->wr_coeff > 1) in bfq_arm_slice_timer()
2982 bfqd->last_idling_start = ktime_get(); in bfq_arm_slice_timer()
2983 bfqd->last_idling_start_jiffies = jiffies; in bfq_arm_slice_timer()
2985 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), in bfq_arm_slice_timer()
2994 * budget, even if the in-service queue is served at peak rate. And
2999 return (u64)bfqd->peak_rate * USEC_PER_MSEC * in bfq_calc_max_budget()
3000 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; in bfq_calc_max_budget()
3010 if (bfqd->bfq_user_max_budget == 0) { in update_thr_responsiveness_params()
3011 bfqd->bfq_max_budget = in update_thr_responsiveness_params()
3013 bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget); in update_thr_responsiveness_params()
3021 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); in bfq_reset_rate_computation()
3022 bfqd->peak_rate_samples = 1; in bfq_reset_rate_computation()
3023 bfqd->sequential_samples = 0; in bfq_reset_rate_computation()
3024 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = in bfq_reset_rate_computation()
3027 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ in bfq_reset_rate_computation()
3031 bfqd->peak_rate_samples, bfqd->sequential_samples, in bfq_reset_rate_computation()
3032 bfqd->tot_sectors_dispatched); in bfq_reset_rate_computation()
3047 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || in bfq_update_rate_reset()
3048 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) in bfq_update_rate_reset()
3057 bfqd->delta_from_first = in bfq_update_rate_reset()
3058 max_t(u64, bfqd->delta_from_first, in bfq_update_rate_reset()
3059 bfqd->last_completion - bfqd->first_dispatch); in bfq_update_rate_reset()
3065 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT, in bfq_update_rate_reset()
3066 div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); in bfq_update_rate_reset()
3070 * - the percentage of sequential dispatches is below 3/4 of the in bfq_update_rate_reset()
3071 * total, and rate is below the current estimated peak rate in bfq_update_rate_reset()
3072 * - rate is unreasonably high (> 20M sectors/sec) in bfq_update_rate_reset()
3074 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && in bfq_update_rate_reset()
3075 rate <= bfqd->peak_rate) || in bfq_update_rate_reset()
3081 * we use a low-pass filter. We compute the smoothing constant in bfq_update_rate_reset()
3097 * cannot reach 9, because bfqd->sequential_samples cannot in bfq_update_rate_reset()
3098 * become equal to bfqd->peak_rate_samples, which, in its in bfq_update_rate_reset()
3099 * turn, holds true because bfqd->sequential_samples is not in bfq_update_rate_reset()
3102 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; in bfq_update_rate_reset()
3109 div_u64(weight * bfqd->delta_from_first, in bfq_update_rate_reset()
3116 divisor = 10 - weight; in bfq_update_rate_reset()
3121 * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor in bfq_update_rate_reset()
3123 bfqd->peak_rate *= divisor-1; in bfq_update_rate_reset()
3124 bfqd->peak_rate /= divisor; in bfq_update_rate_reset()
3127 bfqd->peak_rate += rate; in bfq_update_rate_reset()
3130 * For a very slow device, bfqd->peak_rate can reach 0 (see in bfq_update_rate_reset()
3133 * divisions by zero where bfqd->peak_rate is used as a in bfq_update_rate_reset()
3136 bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); in bfq_update_rate_reset()
3146 * auto-tuning, see update_thr_responsiveness_params()).
3161 * unknown, namely in-device request service rate.
3180 if (bfqd->peak_rate_samples == 0) { /* first dispatch */ in bfq_update_peak_rate()
3182 bfqd->peak_rate_samples); in bfq_update_peak_rate()
3190 * for computing a new peak rate (similarly to the late- in bfq_update_peak_rate()
3194 * - close the observation interval at the last (previous) in bfq_update_peak_rate()
3196 * - compute rate, if possible, for that observation interval in bfq_update_peak_rate()
3197 * - start a new observation interval with this dispatch in bfq_update_peak_rate()
3199 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && in bfq_update_peak_rate()
3200 bfqd->rq_in_driver == 0) in bfq_update_peak_rate()
3204 bfqd->peak_rate_samples++; in bfq_update_peak_rate()
3206 if ((bfqd->rq_in_driver > 0 || in bfq_update_peak_rate()
3207 now_ns - bfqd->last_completion < BFQ_MIN_TT) in bfq_update_peak_rate()
3208 && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq)) in bfq_update_peak_rate()
3209 bfqd->sequential_samples++; in bfq_update_peak_rate()
3211 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); in bfq_update_peak_rate()
3214 if (likely(bfqd->peak_rate_samples % 32)) in bfq_update_peak_rate()
3215 bfqd->last_rq_max_size = in bfq_update_peak_rate()
3216 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); in bfq_update_peak_rate()
3218 bfqd->last_rq_max_size = blk_rq_sectors(rq); in bfq_update_peak_rate()
3220 bfqd->delta_from_first = now_ns - bfqd->first_dispatch; in bfq_update_peak_rate()
3223 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) in bfq_update_peak_rate()
3229 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); in bfq_update_peak_rate()
3230 if (RQ_BFQQ(rq) == bfqd->in_service_queue) in bfq_update_peak_rate()
3231 bfqd->in_serv_last_pos = bfqd->last_position; in bfq_update_peak_rate()
3232 bfqd->last_dispatch = now_ns; in bfq_update_peak_rate()
3248 * dispatch occur for a non in-service bfqq, this anticipated in bfq_dispatch_remove()
3249 * increment prevents two counters related to bfqq->dispatched in bfq_dispatch_remove()
3251 * incremented again when the (new) value of bfqq->dispatched in bfq_dispatch_remove()
3254 bfqq->dispatched++; in bfq_dispatch_remove()
3255 bfq_update_peak_rate(q->elevator->elevator_data, rq); in bfq_dispatch_remove()
3273 * the service order of the internally-queued requests, does
3276 * concern about per-process throughput distribution, and
3277 * makes its decisions only on a per-request basis. Therefore,
3282 * (i-a) each of these processes must get the same throughput as
3284 * (i-b) in case (i-a) does not hold, it holds that the process
3290 * (from I/O-bound to sporadic), and so on;
3296 * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
3301 * idling (I/O-dispatch plugging) is certainly needed to guarantee
3309 * throughput, it is important to check conditions (i-a), i(-b) and
3315 * very difficult to check conditions (i-a) and (i-b) too. In fact,
3316 * if there are active groups, then, for conditions (i-a) or (i-b) to
3318 * contains more active processes or sub-groups than some other active
3319 * group. More precisely, for conditions (i-a) or (i-b) to become
3327 * inactive while still having in-flight requests, and if, when this
3333 * bi-modal behavior, implemented in the function
3340 * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
3346 * for completion, then only conditions (i-a) and (i-b) are actually
3347 * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
3349 * holds. In other words, only if conditions (i-a) and (i-b) do not
3353 * control conditions (i-a) and (i-b) it is enough to check just
3366 * can still preempt the new in-service queue if the next
3370 * combined with the hole-recovery heuristic described in the
3377 * minimum of mid-term fairness.
3379 * More precisely, this preemption-based, idleless approach
3403 * We are now left only with explaining the two sub-conditions in the
3406 * sub-condition, we need to add that the function
3408 * non-weight-raised queues, for efficiency reasons (see comments on
3409 * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
3412 * weight-raised, the scenario is still symmetric if all queues with
3414 * weight-raised. Actually, we should be even more precise here, and
3415 * differentiate between interactive weight raising and soft real-time
3418 * The second sub-condition checked in the compound condition is
3419 * whether there is a fair amount of already in-flight I/O not
3421 * following reason. The drive may decide to serve in-flight
3422 * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
3424 * I/O-dispatching is not plugged, then, while bfqq remains empty, a
3432 * in-flight I/O, and enables bfqq to recover the bandwidth it may
3436 * device-idling countermeasures may however fail in the following
3437 * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled
3438 * in a time period during which all symmetry sub-conditions hold, and
3440 * some later point in time some sub-condition stops to hold, then it
3443 * served. The last sub-condition commented above somewhat mitigates
3444 * this problem for weight-raised queues.
3453 return (bfqq->wr_coeff > 1 && in idling_needed_for_service_guarantees()
3454 (bfqd->wr_busy_queues < in idling_needed_for_service_guarantees()
3456 bfqd->rq_in_driver >= in idling_needed_for_service_guarantees()
3457 bfqq->dispatched + 4)) || in idling_needed_for_service_guarantees()
3481 * not re-scheduled. To prevent this from happening, re-queue in __bfq_bfqq_expire()
3482 * bfqq if it needs I/O-dispatch plugging, even if it is in __bfq_bfqq_expire()
3486 if (RB_EMPTY_ROOT(&bfqq->sort_list) && in __bfq_bfqq_expire()
3489 if (bfqq->dispatched == 0) in __bfq_bfqq_expire()
3494 * the weight-raising mechanism. in __bfq_bfqq_expire()
3496 bfqq->budget_timeout = jiffies; in __bfq_bfqq_expire()
3505 if (unlikely(!bfqd->nonrot_with_queueing && in __bfq_bfqq_expire()
3506 !RB_EMPTY_ROOT(&bfqq->sort_list))) in __bfq_bfqq_expire()
3511 * All in-service entities must have been properly deactivated in __bfq_bfqq_expire()
3513 * resets all in-service entities as no more in service. This in __bfq_bfqq_expire()
3521 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
3538 if (bfqq->wr_coeff == 1) in __bfq_bfqq_recalc_budget()
3539 budget = bfqq->max_budget; in __bfq_bfqq_recalc_budget()
3541 * Use a constant, low budget for weight-raised queues, in __bfq_bfqq_recalc_budget()
3549 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); in __bfq_bfqq_recalc_budget()
3553 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); in __bfq_bfqq_recalc_budget()
3555 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { in __bfq_bfqq_recalc_budget()
3582 * contrary we increase it to possibly boost in __bfq_bfqq_recalc_budget()
3586 if (bfqq->dispatched > 0) /* still outstanding reqs */ in __bfq_bfqq_recalc_budget()
3587 budget = min(budget * 2, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
3590 budget -= 4 * min_budget; in __bfq_bfqq_recalc_budget()
3598 * the chance to boost the throughput if this in __bfq_bfqq_recalc_budget()
3602 budget = min(budget * 2, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
3612 * candidate to boost the disk throughput. in __bfq_bfqq_recalc_budget()
3614 budget = min(budget * 4, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
3628 * back-shifting. The larger the budget of the in __bfq_bfqq_recalc_budget()
3637 * many re-activations a lower finish time in __bfq_bfqq_recalc_budget()
3641 * quite precisely by bfqq->entity.service. in __bfq_bfqq_recalc_budget()
3643 * bfqq->entity.service is equal to the number in __bfq_bfqq_recalc_budget()
3649 budget = max_t(int, bfqq->entity.service, min_budget); in __bfq_bfqq_recalc_budget()
3661 budget = bfqd->bfq_max_budget; in __bfq_bfqq_recalc_budget()
3664 bfqq->max_budget = budget; in __bfq_bfqq_recalc_budget()
3666 if (bfqd->budgets_assigned >= bfq_stats_min_budgets && in __bfq_bfqq_recalc_budget()
3667 !bfqd->bfq_user_max_budget) in __bfq_bfqq_recalc_budget()
3668 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
3680 next_rq = bfqq->next_rq; in __bfq_bfqq_recalc_budget()
3682 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, in __bfq_bfqq_recalc_budget()
3687 bfqq->entity.budget); in __bfq_bfqq_recalc_budget()
3710 * service slots. On the opposite end, the requests of the in-service
3733 delta_ktime = bfqd->last_idling_start; in bfq_bfqq_is_slow()
3736 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); in bfq_bfqq_is_slow()
3741 if (blk_queue_nonrot(bfqd->queue)) in bfq_bfqq_is_slow()
3743 * give same worst-case guarantees as idling in bfq_bfqq_is_slow()
3770 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; in bfq_bfqq_is_slow()
3779 * To be deemed as soft real-time, an application must meet two
3782 * record a compressed high-definition video.
3784 * batch, to compute the next-start time instant, soft_rt_next_start, such
3798 * Unfortunately, even a greedy (i.e., I/O-bound) application may
3805 * device: the storage device is highly loaded or reaches a low-enough
3810 * that greedy applications are deemed as soft real-time in these
3815 * (a) Current time plus: (1) the maximum time for which the arrival
3817 * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
3819 * jiffies; we get back to it after next item (b). Lower-bounding
3820 * the return value of this function with the current time plus
3821 * bfqd->bfq_slice_idle tends to filter out greedy applications,
3824 * real-time application spends some time processing data, after a
3827 * (b) Current value of bfqq->soft_rt_next_start. As pointed out
3830 * storage-device load. In more detail, in these scenarios, these
3833 * including the filtering in above item (a). These slow-speed
3837 * I/O in the high-speed intervals, the values returned by this
3839 * high-speed interval, to be likely to fall *after* the end of
3840 * the low-speed time interval that follows. These high values are
3841 * stored in bfqq->soft_rt_next_start after each invocation of
3843 * bfqq->soft_rt_next_start is constantly used to lower-bound the
3845 * beginning of a low-speed interval, bfqq->soft_rt_next_start is
3847 * issued during the low-speed interval is considered as arriving
3849 * real-time. Then, in the high-speed interval that follows, the
3850 * application will not be deemed as soft real-time, just because
3856 * bfqd->bfq_slice_idle:
3858 * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
3860 * that the approximation, in jiffies, of bfqd->bfq_slice_idle
3866 * reference time interval just bfqd->bfq_slice_idle, but
3867 * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
3874 return max3(bfqq->soft_rt_next_start, in bfq_bfqq_softrt_next_start()
3875 bfqq->last_idle_bklogged + in bfq_bfqq_softrt_next_start()
3876 HZ * bfqq->service_from_backlogged / in bfq_bfqq_softrt_next_start()
3877 bfqd->bfq_wr_max_softrt_rate, in bfq_bfqq_softrt_next_start()
3878 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); in bfq_bfqq_softrt_next_start()
3882 * bfq_bfqq_expire - expire a queue.
3897 * tends to lower the throughput). In addition, this time-charging
3914 struct bfq_entity *entity = &bfqq->entity; in bfq_bfqq_expire()
3923 * timed-out queues with the time and not the service in bfq_bfqq_expire()
3934 * or quasi-sequential processes. in bfq_bfqq_expire()
3936 if (bfqq->wr_coeff == 1 && in bfq_bfqq_expire()
3939 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3))) in bfq_bfqq_expire()
3943 entity->service <= 2 * entity->budget / 10) in bfq_bfqq_expire()
3946 if (bfqd->low_latency && bfqq->wr_coeff == 1) in bfq_bfqq_expire()
3947 bfqq->last_wr_start_finish = jiffies; in bfq_bfqq_expire()
3949 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && in bfq_bfqq_expire()
3950 RB_EMPTY_ROOT(&bfqq->sort_list)) { in bfq_bfqq_expire()
3960 * real-time application. Such an application will in bfq_bfqq_expire()
3961 * actually exhibit a soft real-time I/O pattern after in bfq_bfqq_expire()
3977 if (bfqq->dispatched == 0 && in bfq_bfqq_expire()
3978 bfqq->wr_coeff != bfqd->bfq_wr_coeff) in bfq_bfqq_expire()
3979 bfqq->soft_rt_next_start = in bfq_bfqq_expire()
3981 else if (bfqq->dispatched > 0) { in bfq_bfqq_expire()
3992 slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq)); in bfq_bfqq_expire()
3999 bfqd->rqs_injected = bfqd->wait_dispatch = false; in bfq_bfqq_expire()
4000 bfqd->waited_rq = NULL; in bfq_bfqq_expire()
4022 entity->service = 0; in bfq_bfqq_expire()
4025 * Reset the received-service counter for every parent entity. in bfq_bfqq_expire()
4026 * Differently from what happens with bfqq->entity.service, in bfq_bfqq_expire()
4030 * consumed budget, bfqq->entity.service needs to be kept, in bfq_bfqq_expire()
4032 * the same budget, the last value of bfqq->entity.service is in bfq_bfqq_expire()
4033 * needed to properly decrement bfqq->entity.budget by the in bfq_bfqq_expire()
4035 * to keep entity->service for parent entities too, because in bfq_bfqq_expire()
4036 * the bubble up of the new value of bfqq->entity.budget will in bfq_bfqq_expire()
4041 entity = entity->parent; in bfq_bfqq_expire()
4043 entity->service = 0; in bfq_bfqq_expire()
4053 return time_is_before_eq_jiffies(bfqq->budget_timeout); in bfq_bfqq_budget_timeout()
4066 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfq_may_expire_for_budg_timeout()
4069 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, in bfq_may_expire_for_budg_timeout()
4073 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) in bfq_may_expire_for_budg_timeout()
4082 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, in idling_boosts_thr_without_issues()
4099 * (a) the device is not NCQ-capable and rotational, or in idling_boosts_thr_without_issues()
4101 * the request pattern for bfqq is I/O-bound and sequential, or in idling_boosts_thr_without_issues()
4103 * not NCQ-capable and the request pattern for bfqq is in idling_boosts_thr_without_issues()
4104 * I/O-bound and sequential. in idling_boosts_thr_without_issues()
4107 * NCQ-capable flash-based device would not boost the in idling_boosts_thr_without_issues()
4112 * particular, happens to be false if bfqd is an NCQ-capable in idling_boosts_thr_without_issues()
4113 * flash-based device. in idling_boosts_thr_without_issues()
4116 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && in idling_boosts_thr_without_issues()
4123 * weight-raised queues. in idling_boosts_thr_without_issues()
4127 * non-weight-raised queues ask for requests at a lower rate, in idling_boosts_thr_without_issues()
4128 * then processes associated with weight-raised queues have a in idling_boosts_thr_without_issues()
4133 * weight. This is especially true with NCQ-capable drives, in idling_boosts_thr_without_issues()
4135 * reorder internally-queued requests. in idling_boosts_thr_without_issues()
4138 * there are weight-raised busy queues. In this case, and if in idling_boosts_thr_without_issues()
4139 * bfqq is not weight-raised, this guarantees that the device in idling_boosts_thr_without_issues()
4140 * is not idled for bfqq (if, instead, bfqq is weight-raised, in idling_boosts_thr_without_issues()
4144 * sync non-weight-raised queue, to get a lower number of in idling_boosts_thr_without_issues()
4147 * weight-raised queues get served again. This often mitigates in idling_boosts_thr_without_issues()
4154 bfqd->wr_busy_queues == 0; in idling_boosts_thr_without_issues()
4168 * NCQ-capable devices, this function tries to return false, so as to
4170 * device boost the throughput without causing any service-guarantee
4180 struct bfq_data *bfqd = bfqq->bfqd; in bfq_better_to_idle()
4187 if (unlikely(bfqd->strict_guarantees)) in bfq_better_to_idle()
4196 * queues in this class can steal to higher-priority queues in bfq_better_to_idle()
4198 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || in bfq_better_to_idle()
4219 * If the in-service queue is empty but the function bfq_better_to_idle
4225 * why performing device idling is the best choice to boost the throughput
4231 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq); in bfq_bfqq_must_idle()
4244 struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue; in bfq_choose_bfqq_for_injection()
4245 unsigned int limit = in_serv_bfqq->inject_limit; in bfq_choose_bfqq_for_injection() local
4248 * - bfqq is not weight-raised and therefore does not carry in bfq_choose_bfqq_for_injection()
4249 * time-critical I/O, in bfq_choose_bfqq_for_injection()
4251 * - regardless of whether bfqq is weight-raised, bfqq has in bfq_choose_bfqq_for_injection()
4258 bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 || in bfq_choose_bfqq_for_injection()
4263 * - the baseline total service time could not be sampled yet, in bfq_choose_bfqq_for_injection()
4264 * so the inject limit happens to be still 0, and in bfq_choose_bfqq_for_injection()
4265 * - a lot of time has elapsed since the plugging of I/O in bfq_choose_bfqq_for_injection()
4268 * then temporarily raise inject limit to one request. in bfq_choose_bfqq_for_injection()
4270 if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 && in bfq_choose_bfqq_for_injection()
4272 time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies + in bfq_choose_bfqq_for_injection()
4273 bfqd->bfq_slice_idle) in bfq_choose_bfqq_for_injection()
4275 limit = 1; in bfq_choose_bfqq_for_injection()
4277 if (bfqd->rq_in_driver >= limit) in bfq_choose_bfqq_for_injection()
4285 * - BFQ dynamically updates the budget of every queue so as in bfq_choose_bfqq_for_injection()
4287 * - if a queue gets all its requests dispatched as injected in bfq_choose_bfqq_for_injection()
4289 * (and re-added only if it gets new requests, but then it in bfq_choose_bfqq_for_injection()
4292 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) in bfq_choose_bfqq_for_injection()
4293 if (!RB_EMPTY_ROOT(&bfqq->sort_list) && in bfq_choose_bfqq_for_injection()
4294 (in_serv_always_inject || bfqq->wr_coeff > 1) && in bfq_choose_bfqq_for_injection()
4295 bfq_serv_to_charge(bfqq->next_rq, bfqq) <= in bfq_choose_bfqq_for_injection()
4298 * Allow for only one large in-flight request in bfq_choose_bfqq_for_injection()
4299 * on non-rotational devices, for the in bfq_choose_bfqq_for_injection()
4300 * following reason. On non-rotationl drives, in bfq_choose_bfqq_for_injection()
4307 * request of the in-service queue wait for so in bfq_choose_bfqq_for_injection()
4311 * there is only one in-flight large request in bfq_choose_bfqq_for_injection()
4314 if (blk_queue_nonrot(bfqd->queue) && in bfq_choose_bfqq_for_injection()
4315 blk_rq_sectors(bfqq->next_rq) >= in bfq_choose_bfqq_for_injection()
4317 limit = min_t(unsigned int, 1, limit); in bfq_choose_bfqq_for_injection()
4319 limit = in_serv_bfqq->inject_limit; in bfq_choose_bfqq_for_injection()
4321 if (bfqd->rq_in_driver < limit) { in bfq_choose_bfqq_for_injection()
4322 bfqd->rqs_injected = true; in bfq_choose_bfqq_for_injection()
4331 * Select a queue for service. If we have a current queue in service,
4340 bfqq = bfqd->in_service_queue; in bfq_select_queue()
4344 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); in bfq_select_queue()
4360 * happens, it is much more convenient to re-execute this loop in bfq_select_queue()
4364 next_rq = bfqq->next_rq; in bfq_select_queue()
4401 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); in bfq_select_queue()
4408 * No requests pending. However, if the in-service queue is idling in bfq_select_queue()
4416 (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) { in bfq_select_queue()
4418 bfqq->bic && bfqq->bic->bfqq[0] && in bfq_select_queue()
4419 bfq_bfqq_busy(bfqq->bic->bfqq[0]) && in bfq_select_queue()
4420 bfqq->bic->bfqq[0]->next_rq ? in bfq_select_queue()
4421 bfqq->bic->bfqq[0] : NULL; in bfq_select_queue()
4424 * The next three mutually-exclusive ifs decide in bfq_select_queue()
4436 * non-empty waker queue for bfqq, i.e., a queue whose in bfq_select_queue()
4469 * limit for bfqq is currently 0). in bfq_select_queue()
4473 * Thanks to the way the inject limit is updated in in bfq_select_queue()
4479 * I/O-plugging timeout fires. So one may deem the in bfq_select_queue()
4484 * inject limit may be too low to guarantee the same in bfq_select_queue()
4496 icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic && in bfq_select_queue()
4497 bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <= in bfq_select_queue()
4499 bfqq = bfqq->bic->bfqq[0]; in bfq_select_queue()
4501 bfq_bfqq_busy(bfqq->waker_bfqq) && in bfq_select_queue()
4502 bfqq->next_rq && in bfq_select_queue()
4503 bfq_serv_to_charge(bfqq->waker_bfqq->next_rq, in bfq_select_queue()
4504 bfqq->waker_bfqq) <= in bfq_select_queue()
4505 bfq_bfqq_budget_left(bfqq->waker_bfqq) in bfq_select_queue()
4507 bfqq = bfqq->waker_bfqq; in bfq_select_queue()
4509 (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 || in bfq_select_queue()
4538 struct bfq_entity *entity = &bfqq->entity; in bfq_update_wr_data()
4540 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ in bfq_update_wr_data()
4543 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), in bfq_update_wr_data()
4544 jiffies_to_msecs(bfqq->wr_cur_max_time), in bfq_update_wr_data()
4545 bfqq->wr_coeff, in bfq_update_wr_data()
4546 bfqq->entity.weight, bfqq->entity.orig_weight); in bfq_update_wr_data()
4548 if (entity->prio_changed) in bfq_update_wr_data()
4554 * weight-raising period, then end weight raising. in bfq_update_wr_data()
4558 else if (time_is_before_jiffies(bfqq->last_wr_start_finish + in bfq_update_wr_data()
4559 bfqq->wr_cur_max_time)) { in bfq_update_wr_data()
4560 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || in bfq_update_wr_data()
4561 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + in bfq_update_wr_data()
4566 bfqq->entity.prio_changed = 1; in bfq_update_wr_data()
4569 if (bfqq->wr_coeff > 1 && in bfq_update_wr_data()
4570 bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && in bfq_update_wr_data()
4571 bfqq->service_from_wr > max_service_from_wr) { in bfq_update_wr_data()
4584 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) in bfq_update_wr_data()
4595 struct request *rq = bfqq->next_rq; in bfq_dispatch_rq_from_bfqq()
4602 if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) { in bfq_dispatch_rq_from_bfqq()
4603 bfqd->wait_dispatch = false; in bfq_dispatch_rq_from_bfqq()
4604 bfqd->waited_rq = rq; in bfq_dispatch_rq_from_bfqq()
4607 bfq_dispatch_remove(bfqd->queue, rq); in bfq_dispatch_rq_from_bfqq()
4609 if (bfqq != bfqd->in_service_queue) in bfq_dispatch_rq_from_bfqq()
4617 * weight-raised during this service slot, even if it has in bfq_dispatch_rq_from_bfqq()
4619 * weight-raised queue. This inflates bfqq's timestamps, which in bfq_dispatch_rq_from_bfqq()
4621 * device immediately to possible other weight-raised queues. in bfq_dispatch_rq_from_bfqq()
4641 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work()
4643 if (!atomic_read(&hctx->elevator_queued)) in bfq_has_work()
4647 * Avoiding lock: a race on bfqd->busy_queues should cause at in bfq_has_work()
4650 return !list_empty_careful(&bfqd->dispatch) || in bfq_has_work()
4656 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request()
4660 if (!list_empty(&bfqd->dispatch)) { in __bfq_dispatch_request()
4661 rq = list_first_entry(&bfqd->dispatch, struct request, in __bfq_dispatch_request()
4663 list_del_init(&rq->queuelist); in __bfq_dispatch_request()
4674 bfqq->dispatched++; in __bfq_dispatch_request()
4699 * being the frequency of non-elevator-private in __bfq_dispatch_request()
4723 if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) in __bfq_dispatch_request()
4734 bfqd->rq_in_driver++; in __bfq_dispatch_request()
4736 rq->rq_flags |= RQF_STARTED; in __bfq_dispatch_request()
4766 spin_lock_irq(&q->queue_lock); in bfq_update_dispatch_stats()
4783 bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); in bfq_update_dispatch_stats()
4785 spin_unlock_irq(&q->queue_lock); in bfq_update_dispatch_stats()
4796 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request()
4801 spin_lock_irq(&bfqd->lock); in bfq_dispatch_request()
4803 in_serv_queue = bfqd->in_service_queue; in bfq_dispatch_request()
4811 spin_unlock_irq(&bfqd->lock); in bfq_dispatch_request()
4813 bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, in bfq_dispatch_request()
4821 * in-flight on this queue also holds a reference, dropped when rq is freed.
4832 if (bfqq->bfqd) in bfq_put_queue()
4833 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", in bfq_put_queue()
4834 bfqq, bfqq->ref); in bfq_put_queue()
4836 bfqq->ref--; in bfq_put_queue()
4837 if (bfqq->ref) in bfq_put_queue()
4840 if (!hlist_unhashed(&bfqq->burst_list_node)) { in bfq_put_queue()
4841 hlist_del_init(&bfqq->burst_list_node); in bfq_put_queue()
4847 * bursts, when some short-lived process (often due to in bfq_put_queue()
4862 * the current burst list--without incrementing in bfq_put_queue()
4863 * bust_size--because of a split, but the current in bfq_put_queue()
4868 if (bfqq->bic && bfqq->bfqd->burst_size > 0) in bfq_put_queue()
4869 bfqq->bfqd->burst_size--; in bfq_put_queue()
4888 if (!hlist_unhashed(&bfqq->woken_list_node)) in bfq_put_queue()
4889 hlist_del_init(&bfqq->woken_list_node); in bfq_put_queue()
4892 hlist_for_each_entry_safe(item, n, &bfqq->woken_list, in bfq_put_queue()
4894 item->waker_bfqq = NULL; in bfq_put_queue()
4896 hlist_del_init(&item->woken_list_node); in bfq_put_queue()
4899 if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq) in bfq_put_queue()
4900 bfqq->bfqd->last_completed_rq_bfqq = NULL; in bfq_put_queue()
4915 __bfqq = bfqq->new_bfqq; in bfq_put_cooperator()
4919 next = __bfqq->new_bfqq; in bfq_put_cooperator()
4927 if (bfqq == bfqd->in_service_queue) { in bfq_exit_bfqq()
4932 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); in bfq_exit_bfqq()
4945 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ in bfq_exit_icq_bfqq()
4950 spin_lock_irqsave(&bfqd->lock, flags); in bfq_exit_icq_bfqq()
4951 bfqq->bic = NULL; in bfq_exit_icq_bfqq()
4954 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_exit_icq_bfqq()
4973 struct task_struct *tsk = current; in bfq_set_next_ioprio_data()
4975 struct bfq_data *bfqd = bfqq->bfqd; in bfq_set_next_ioprio_data()
4980 ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); in bfq_set_next_ioprio_data()
4984 bdi_dev_name(bfqq->bfqd->queue->backing_dev_info), in bfq_set_next_ioprio_data()
4991 bfqq->new_ioprio = task_nice_ioprio(tsk); in bfq_set_next_ioprio_data()
4992 bfqq->new_ioprio_class = task_nice_ioclass(tsk); in bfq_set_next_ioprio_data()
4995 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); in bfq_set_next_ioprio_data()
4996 bfqq->new_ioprio_class = IOPRIO_CLASS_RT; in bfq_set_next_ioprio_data()
4999 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); in bfq_set_next_ioprio_data()
5000 bfqq->new_ioprio_class = IOPRIO_CLASS_BE; in bfq_set_next_ioprio_data()
5003 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; in bfq_set_next_ioprio_data()
5004 bfqq->new_ioprio = 7; in bfq_set_next_ioprio_data()
5008 if (bfqq->new_ioprio >= IOPRIO_BE_NR) { in bfq_set_next_ioprio_data()
5010 bfqq->new_ioprio); in bfq_set_next_ioprio_data()
5011 bfqq->new_ioprio = IOPRIO_BE_NR; in bfq_set_next_ioprio_data()
5014 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); in bfq_set_next_ioprio_data()
5015 bfqq->entity.prio_changed = 1; in bfq_set_next_ioprio_data()
5026 int ioprio = bic->icq.ioc->ioprio; in bfq_check_ioprio_change()
5032 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) in bfq_check_ioprio_change()
5035 bic->ioprio = ioprio; in bfq_check_ioprio_change()
5052 RB_CLEAR_NODE(&bfqq->entity.rb_node); in bfq_init_bfqq()
5053 INIT_LIST_HEAD(&bfqq->fifo); in bfq_init_bfqq()
5054 INIT_HLIST_NODE(&bfqq->burst_list_node); in bfq_init_bfqq()
5055 INIT_HLIST_NODE(&bfqq->woken_list_node); in bfq_init_bfqq()
5056 INIT_HLIST_HEAD(&bfqq->woken_list); in bfq_init_bfqq()
5058 bfqq->ref = 0; in bfq_init_bfqq()
5059 bfqq->bfqd = bfqd; in bfq_init_bfqq()
5079 bfqq->ttime.last_end_request = ktime_get_ns() + 1; in bfq_init_bfqq()
5083 bfqq->pid = pid; in bfq_init_bfqq()
5086 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; in bfq_init_bfqq()
5087 bfqq->budget_timeout = bfq_smallest_from_now(); in bfq_init_bfqq()
5089 bfqq->wr_coeff = 1; in bfq_init_bfqq()
5090 bfqq->last_wr_start_finish = jiffies; in bfq_init_bfqq()
5091 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); in bfq_init_bfqq()
5092 bfqq->split_time = bfq_smallest_from_now(); in bfq_init_bfqq()
5098 * to the current value of bfqq->soft_rt_next_start (see in bfq_init_bfqq()
5103 bfqq->soft_rt_next_start = jiffies; in bfq_init_bfqq()
5106 bfqq->seek_history = 1; in bfq_init_bfqq()
5115 return &bfqg->async_bfqq[0][ioprio]; in bfq_async_queue_prio()
5120 return &bfqg->async_bfqq[1][ioprio]; in bfq_async_queue_prio()
5122 return &bfqg->async_idle_bfqq; in bfq_async_queue_prio()
5132 const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); in bfq_get_queue()
5133 const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); in bfq_get_queue()
5142 bfqq = &bfqd->oom_bfqq; in bfq_get_queue()
5156 bfqd->queue->node); in bfq_get_queue()
5159 bfq_init_bfqq(bfqd, bfqq, bic, current->pid, in bfq_get_queue()
5161 bfq_init_entity(&bfqq->entity, bfqg); in bfq_get_queue()
5164 bfqq = &bfqd->oom_bfqq; in bfq_get_queue()
5174 bfqq->ref++; /* in bfq_get_queue()
5177 * only if bfqq->bfqg disappears, to in bfq_get_queue()
5182 bfqq, bfqq->ref); in bfq_get_queue()
5187 bfqq->ref++; /* get a process reference to this queue */ in bfq_get_queue()
5188 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); in bfq_get_queue()
5196 struct bfq_ttime *ttime = &bfqq->ttime; in bfq_update_io_thinktime()
5197 u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request; in bfq_update_io_thinktime()
5199 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); in bfq_update_io_thinktime()
5201 ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8; in bfq_update_io_thinktime()
5202 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); in bfq_update_io_thinktime()
5203 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, in bfq_update_io_thinktime()
5204 ttime->ttime_samples); in bfq_update_io_thinktime()
5211 bfqq->seek_history <<= 1; in bfq_update_io_seektime()
5212 bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq); in bfq_update_io_seektime()
5214 if (bfqq->wr_coeff > 1 && in bfq_update_io_seektime()
5215 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in bfq_update_io_seektime()
5232 bfqd->bfq_slice_idle == 0) in bfq_update_has_short_ttime()
5236 if (time_is_after_eq_jiffies(bfqq->split_time + in bfq_update_has_short_ttime()
5237 bfqd->bfq_wr_min_idle_time)) in bfq_update_has_short_ttime()
5244 if (atomic_read(&bic->icq.ioc->active_ref) == 0 || in bfq_update_has_short_ttime()
5245 (bfq_sample_valid(bfqq->ttime.ttime_samples) && in bfq_update_has_short_ttime()
5246 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) in bfq_update_has_short_ttime()
5258 * finally computed for bfqq, the inject limit does depend on in bfq_update_has_short_ttime()
5259 * the think-time state (short|long). In particular, the limit in bfq_update_has_short_ttime()
5263 * instructions reset the inject limit if the think-time state in bfq_update_has_short_ttime()
5268 * have elapsed since the last update of the inject limit, or in bfq_update_has_short_ttime()
5282 * I/O-dispatch-plugging, then bfqq remains empty, and no I/O in bfq_update_has_short_ttime()
5287 * On the opposite end, a non-zero inject limit may allow the in bfq_update_has_short_ttime()
5292 * next think-time sample for bfqq may be very low. This in in bfq_update_has_short_ttime()
5297 * limit. Without injection, the blocking I/O would cause the in bfq_update_has_short_ttime()
5299 * inject limit to be raised again, and so on. The only effect in bfq_update_has_short_ttime()
5300 * of such a steady oscillation between the two think-time in bfq_update_has_short_ttime()
5303 * In contrast, if the inject limit is not reset during such a in bfq_update_has_short_ttime()
5309 * inject limit. The inject limit remains steadily equal to 1 in bfq_update_has_short_ttime()
5313 * An inject limit equal to 1 is however in conflict, in in bfq_update_has_short_ttime()
5326 * more frequently than once per I/O-plugging timeout, makes in bfq_update_has_short_ttime()
5330 * to boost throughput more effectively, by injecting the I/O in bfq_update_has_short_ttime()
5335 * limit before 100 ms is that, during this time interval, the in bfq_update_has_short_ttime()
5337 * finally computed for bfqq, freeing the inject limit from in bfq_update_has_short_ttime()
5340 if (state_changed && bfqq->last_serv_time_ns == 0 && in bfq_update_has_short_ttime()
5341 (time_is_before_eq_jiffies(bfqq->decrease_time_jif + in bfq_update_has_short_ttime()
5354 if (rq->cmd_flags & REQ_META) in bfq_rq_enqueued()
5355 bfqq->meta_pending++; in bfq_rq_enqueued()
5357 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); in bfq_rq_enqueued()
5359 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { in bfq_rq_enqueued()
5360 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && in bfq_rq_enqueued()
5366 * - the request is small, and in bfq_rq_enqueued()
5367 * - we are idling to boost throughput, and in bfq_rq_enqueued()
5368 * - the queue is not to be expired, in bfq_rq_enqueued()
5372 * for a new request from the in-service queue, we in bfq_rq_enqueued()
5392 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); in bfq_rq_enqueued()
5419 new_bfqq->allocated++; in __bfq_insert_request()
5420 bfqq->allocated--; in __bfq_insert_request()
5421 new_bfqq->ref++; in __bfq_insert_request()
5440 rq->elv.priv[1] = new_bfqq; in __bfq_insert_request()
5452 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; in __bfq_insert_request()
5453 list_add_tail(&rq->queuelist, &bfqq->fifo); in __bfq_insert_request()
5479 spin_lock_irq(&q->queue_lock); in bfq_update_insert_stats()
5483 spin_unlock_irq(&q->queue_lock); in bfq_update_insert_stats()
5495 struct request_queue *q = hctx->queue; in bfq_insert_request()
5496 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_insert_request()
5502 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio) in bfq_insert_request()
5505 spin_lock_irq(&bfqd->lock); in bfq_insert_request()
5507 spin_unlock_irq(&bfqd->lock); in bfq_insert_request()
5511 spin_unlock_irq(&bfqd->lock); in bfq_insert_request()
5515 spin_lock_irq(&bfqd->lock); in bfq_insert_request()
5519 list_add(&rq->queuelist, &bfqd->dispatch); in bfq_insert_request()
5521 list_add_tail(&rq->queuelist, &bfqd->dispatch); in bfq_insert_request()
5533 if (!q->last_merge) in bfq_insert_request()
5534 q->last_merge = rq; in bfq_insert_request()
5543 cmd_flags = rq->cmd_flags; in bfq_insert_request()
5545 spin_unlock_irq(&bfqd->lock); in bfq_insert_request()
5558 list_del_init(&rq->queuelist); in bfq_insert_requests()
5560 atomic_inc(&hctx->elevator_queued); in bfq_insert_requests()
5566 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_update_hw_tag()
5568 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, in bfq_update_hw_tag()
5569 bfqd->rq_in_driver); in bfq_update_hw_tag()
5571 if (bfqd->hw_tag == 1) in bfq_update_hw_tag()
5580 if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD) in bfq_update_hw_tag()
5589 bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] < in bfq_update_hw_tag()
5591 bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD) in bfq_update_hw_tag()
5594 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) in bfq_update_hw_tag()
5597 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; in bfq_update_hw_tag()
5598 bfqd->max_rq_in_driver = 0; in bfq_update_hw_tag()
5599 bfqd->hw_tag_samples = 0; in bfq_update_hw_tag()
5601 bfqd->nonrot_with_queueing = in bfq_update_hw_tag()
5602 blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag; in bfq_update_hw_tag()
5612 bfqd->rq_in_driver--; in bfq_completed_request()
5613 bfqq->dispatched--; in bfq_completed_request()
5615 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { in bfq_completed_request()
5619 * no outstanding request; used by the weight-raising in bfq_completed_request()
5622 bfqq->budget_timeout = jiffies; in bfq_completed_request()
5629 bfqq->ttime.last_end_request = now_ns; in bfq_completed_request()
5635 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); in bfq_completed_request()
5646 * - close the observation interval at the last (previous) in bfq_completed_request()
5648 * - compute rate, if possible, for that observation interval in bfq_completed_request()
5649 * - reset to zero samples, which will trigger a proper in bfq_completed_request()
5650 * re-initialization of the observation interval on next in bfq_completed_request()
5654 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us < in bfq_completed_request()
5655 1UL<<(BFQ_RATE_SHIFT - 10)) in bfq_completed_request()
5657 bfqd->last_completion = now_ns; in bfq_completed_request()
5658 bfqd->last_completed_rq_bfqq = bfqq; in bfq_completed_request()
5669 * expires, if it still has in-flight requests. in bfq_completed_request()
5671 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && in bfq_completed_request()
5672 RB_EMPTY_ROOT(&bfqq->sort_list) && in bfq_completed_request()
5673 bfqq->wr_coeff != bfqd->bfq_wr_coeff) in bfq_completed_request()
5674 bfqq->soft_rt_next_start = in bfq_completed_request()
5678 * If this is the in-service queue, check if it needs to be expired, in bfq_completed_request()
5681 if (bfqd->in_service_queue == bfqq) { in bfq_completed_request()
5683 if (bfqq->dispatched == 0) in bfq_completed_request()
5692 * Here bfqq->dispatched > 0 holds, but in bfq_completed_request()
5695 * for bfqq before bfqq->dispatched reaches 0, in bfq_completed_request()
5697 * completion event that causes bfqq->dispatch in bfq_completed_request()
5700 * (I/O-dispatch plugging). in bfq_completed_request()
5704 * when bfqq->dispatched finally reaches in bfq_completed_request()
5712 else if (RB_EMPTY_ROOT(&bfqq->sort_list) && in bfq_completed_request()
5713 (bfqq->dispatched == 0 || in bfq_completed_request()
5719 if (!bfqd->rq_in_driver) in bfq_completed_request()
5725 bfqq->allocated--; in bfq_finish_requeue_request_body()
5739 * allowed to switch to another queue---because bfqq is sync and
5740 * I/O-dispatch needs to be plugged while bfqq is temporarily
5741 * empty---then, during the service of bfqq, there will be frequent
5753 * both boost throughput and not break bfqq's bandwidth and latency
5754 * guarantees. In this respect, the mechanism maintains a per-queue
5755 * inject limit, computed as below. While bfqq is empty, the injection
5757 * of I/O requests in flight---i.e., already dispatched but not yet
5758 * completed---remains lower than this limit.
5761 * which the inject limit is computed. We define as first request for
5763 * service, and causes bfqq to switch from empty to non-empty. The
5764 * algorithm updates the limit as a function of the effect of
5788 * The limit-update algorithm works as follows. On the arrival of a
5791 * case, it updates the limit as described below:
5793 * (1) If there is no in-flight request. This gives a baseline for the
5795 * not been computed yet, then, after computing it, the limit is
5801 * (2) If the limit is higher than 0 and there are in-flight
5804 * current value of the limit is inflating the total service
5807 * service guarantees, and the limit is even tentatively
5809 * limit is decreased. Due to the lack of any hysteresis, this
5810 * logic makes the limit oscillate even in steady workload
5812 * the best value for the limit, as a function of the current I/O
5814 * short time interval after the limit happens to be decreased.
5816 * (3) Periodically, after resetting the limit, to make sure that the
5817 * limit eventually drops in case the workload changes. This is
5818 * needed because, after the limit has gone safely up for a
5823 * injection only once, and then to reset/lower the limit only if
5824 * the total service time with the current limit does happen to be
5837 u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns; in bfq_update_inject_limit()
5838 unsigned int old_limit = bfqq->inject_limit; in bfq_update_inject_limit()
5840 if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) { in bfq_update_inject_limit()
5841 u64 threshold = (bfqq->last_serv_time_ns * 3)>>1; in bfq_update_inject_limit()
5844 bfqq->inject_limit--; in bfq_update_inject_limit()
5845 bfqq->decrease_time_jif = jiffies; in bfq_update_inject_limit()
5847 old_limit <= bfqd->max_rq_in_driver) in bfq_update_inject_limit()
5848 bfqq->inject_limit++; in bfq_update_inject_limit()
5857 * NOTE: (bfqd->rq_in_driver == 1) means that there is no I/O in bfq_update_inject_limit()
5861 * bfqd->rq_in_driver is decremented in such a code path. in bfq_update_inject_limit()
5863 if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) || in bfq_update_inject_limit()
5864 tot_time_ns < bfqq->last_serv_time_ns) { in bfq_update_inject_limit()
5865 if (bfqq->last_serv_time_ns == 0) { in bfq_update_inject_limit()
5870 bfqq->inject_limit = max_t(unsigned int, 1, old_limit); in bfq_update_inject_limit()
5872 bfqq->last_serv_time_ns = tot_time_ns; in bfq_update_inject_limit()
5873 } else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1) in bfq_update_inject_limit()
5883 bfqq->last_serv_time_ns = tot_time_ns; in bfq_update_inject_limit()
5887 bfqd->waited_rq = NULL; in bfq_update_inject_limit()
5888 bfqd->rqs_injected = false; in bfq_update_inject_limit()
5904 * requeued request that has not (yet) been re-inserted into in bfq_finish_requeue_request()
5907 if (!rq->elv.icq || !bfqq) in bfq_finish_requeue_request()
5910 bfqd = bfqq->bfqd; in bfq_finish_requeue_request()
5912 if (rq->rq_flags & RQF_STARTED) in bfq_finish_requeue_request()
5914 rq->start_time_ns, in bfq_finish_requeue_request()
5915 rq->io_start_time_ns, in bfq_finish_requeue_request()
5916 rq->cmd_flags); in bfq_finish_requeue_request()
5918 if (likely(rq->rq_flags & RQF_STARTED)) { in bfq_finish_requeue_request()
5921 spin_lock_irqsave(&bfqd->lock, flags); in bfq_finish_requeue_request()
5923 if (rq == bfqd->waited_rq) in bfq_finish_requeue_request()
5928 atomic_dec(&rq->mq_hctx->elevator_queued); in bfq_finish_requeue_request()
5930 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_finish_requeue_request()
5941 * current version of the code, this implies that the in bfq_finish_requeue_request()
5945 if (!RB_EMPTY_NODE(&rq->rb_node)) { in bfq_finish_requeue_request()
5946 bfq_remove_request(rq->q, rq); in bfq_finish_requeue_request()
5948 rq->cmd_flags); in bfq_finish_requeue_request()
5958 * design would be to prevent blk-mq from invoking the requeue in bfq_finish_requeue_request()
5963 * request-insertion logic if rq is re-inserted into a bfq in bfq_finish_requeue_request()
5964 * internal queue, without a re-preparation. Here we assume in bfq_finish_requeue_request()
5965 * that re-insertions of requeued requests, without in bfq_finish_requeue_request()
5966 * re-preparation, can happen only for pass_through or at_head in bfq_finish_requeue_request()
5967 * requests (which are not re-inserted into bfq internal in bfq_finish_requeue_request()
5970 rq->elv.priv[0] = NULL; in bfq_finish_requeue_request()
5971 rq->elv.priv[1] = NULL; in bfq_finish_requeue_request()
5975 * Removes the association between the current task and bfqq, assuming
5983 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); in bfq_split_bfqq()
5986 bfqq->pid = current->pid; in bfq_split_bfqq()
5996 bfq_release_process_ref(bfqq->bfqd, bfqq); in bfq_split_bfqq()
6008 if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) in bfq_get_bfqq_handle_split()
6020 if ((bic->was_in_burst_list && bfqd->large_burst) || in bfq_get_bfqq_handle_split()
6021 bic->saved_in_large_burst) in bfq_get_bfqq_handle_split()
6025 if (bic->was_in_burst_list) in bfq_get_bfqq_handle_split()
6027 * If bfqq was in the current in bfq_get_bfqq_handle_split()
6040 * current burst list is still in bfq_get_bfqq_handle_split()
6046 * bfqq to the current burst in bfq_get_bfqq_handle_split()
6054 hlist_add_head(&bfqq->burst_list_node, in bfq_get_bfqq_handle_split()
6055 &bfqd->burst_list); in bfq_get_bfqq_handle_split()
6057 bfqq->split_time = jiffies; in bfq_get_bfqq_handle_split()
6076 rq->elv.priv[0] = rq->elv.priv[1] = NULL; in bfq_prepare_request()
6104 struct request_queue *q = rq->q; in bfq_init_rq()
6105 struct bio *bio = rq->bio; in bfq_init_rq()
6106 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_init_rq()
6113 if (unlikely(!rq->elv.icq)) in bfq_init_rq()
6123 if (rq->elv.priv[1]) in bfq_init_rq()
6124 return rq->elv.priv[1]; in bfq_init_rq()
6126 bic = icq_to_bic(rq->elv.icq); in bfq_init_rq()
6142 bic->saved_in_large_burst = true; in bfq_init_rq()
6156 bfqq->allocated++; in bfq_init_rq()
6157 bfqq->ref++; in bfq_init_rq()
6159 rq, bfqq, bfqq->ref); in bfq_init_rq()
6161 rq->elv.priv[0] = bic; in bfq_init_rq()
6162 rq->elv.priv[1] = bfqq; in bfq_init_rq()
6166 * by only this bic: we can then set bfqq->bic = bic. in in bfq_init_rq()
6170 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { in bfq_init_rq()
6171 bfqq->bic = bic; in bfq_init_rq()
6186 * 1) A burst is actually happening (bfqd->burst_size > 0) in bfq_init_rq()
6192 * therefore in not weight-raising bfqq. See comments on in bfq_init_rq()
6204 (bfqd->burst_size > 0 || in bfq_init_rq()
6217 spin_lock_irqsave(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
6226 if (bfqq != bfqd->in_service_queue) { in bfq_idle_slice_timer_body()
6227 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
6240 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) in bfq_idle_slice_timer_body()
6244 * first request of the in-service queue arrives in bfq_idle_slice_timer_body()
6254 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
6259 * Handler of the expiration of the timer running if the in-service queue
6266 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_idle_slice_timer()
6269 * Theoretical race here: the in-service queue can be NULL or in bfq_idle_slice_timer()
6271 * arrives for the current queue and there is a full dispatch in bfq_idle_slice_timer()
6272 * cycle that changes the in-service queue. This can hardly in bfq_idle_slice_timer()
6289 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); in __bfq_put_async_bfqq()
6292 bfqq, bfqq->ref); in __bfq_put_async_bfqq()
6310 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); in bfq_put_async_queues()
6312 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); in bfq_put_async_queues()
6325 * In-word depths if no bfq_queue is being weight-raised: in bfq_update_depths()
6328 * In next formulas, right-shift the value in bfq_update_depths()
6329 * (1U<<bt->sb.shift), instead of computing directly in bfq_update_depths()
6330 * (1U<<(bt->sb.shift - something)), to be robust against in bfq_update_depths()
6331 * any possible value of bt->sb.shift, without having to in bfq_update_depths()
6332 * limit 'something'. in bfq_update_depths()
6335 bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U); in bfq_update_depths()
6341 bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U); in bfq_update_depths()
6344 * In-word depths in case some bfq_queue is being weight- in bfq_update_depths()
6347 * start-up times didn't suffer from any regression due to tag in bfq_update_depths()
6351 bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U); in bfq_update_depths()
6353 bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U); in bfq_update_depths()
6357 min_shallow = min(min_shallow, bfqd->word_depths[i][j]); in bfq_update_depths()
6364 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_depth_updated()
6365 struct blk_mq_tags *tags = hctx->sched_tags; in bfq_depth_updated()
6368 min_shallow = bfq_update_depths(bfqd, tags->bitmap_tags); in bfq_depth_updated()
6369 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, min_shallow); in bfq_depth_updated()
6380 struct bfq_data *bfqd = e->elevator_data; in bfq_exit_queue()
6383 hrtimer_cancel(&bfqd->idle_slice_timer); in bfq_exit_queue()
6385 spin_lock_irq(&bfqd->lock); in bfq_exit_queue()
6386 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) in bfq_exit_queue()
6388 spin_unlock_irq(&bfqd->lock); in bfq_exit_queue()
6390 hrtimer_cancel(&bfqd->idle_slice_timer); in bfq_exit_queue()
6392 /* release oom-queue reference to root group */ in bfq_exit_queue()
6393 bfqg_and_blkg_put(bfqd->root_group); in bfq_exit_queue()
6396 blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); in bfq_exit_queue()
6398 spin_lock_irq(&bfqd->lock); in bfq_exit_queue()
6399 bfq_put_async_queues(bfqd, bfqd->root_group); in bfq_exit_queue()
6400 kfree(bfqd->root_group); in bfq_exit_queue()
6401 spin_unlock_irq(&bfqd->lock); in bfq_exit_queue()
6413 root_group->entity.parent = NULL; in bfq_init_root_group()
6414 root_group->my_entity = NULL; in bfq_init_root_group()
6415 root_group->bfqd = bfqd; in bfq_init_root_group()
6417 root_group->rq_pos_tree = RB_ROOT; in bfq_init_root_group()
6419 root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; in bfq_init_root_group()
6420 root_group->sched_data.bfq_class_idle_last_service = jiffies; in bfq_init_root_group()
6430 return -ENOMEM; in bfq_init_queue()
6432 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); in bfq_init_queue()
6434 kobject_put(&eq->kobj); in bfq_init_queue()
6435 return -ENOMEM; in bfq_init_queue()
6437 eq->elevator_data = bfqd; in bfq_init_queue()
6439 spin_lock_irq(&q->queue_lock); in bfq_init_queue()
6440 q->elevator = eq; in bfq_init_queue()
6441 spin_unlock_irq(&q->queue_lock); in bfq_init_queue()
6448 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); in bfq_init_queue()
6449 bfqd->oom_bfqq.ref++; in bfq_init_queue()
6450 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; in bfq_init_queue()
6451 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; in bfq_init_queue()
6452 bfqd->oom_bfqq.entity.new_weight = in bfq_init_queue()
6453 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); in bfq_init_queue()
6456 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); in bfq_init_queue()
6463 bfqd->oom_bfqq.entity.prio_changed = 1; in bfq_init_queue()
6465 bfqd->queue = q; in bfq_init_queue()
6467 INIT_LIST_HEAD(&bfqd->dispatch); in bfq_init_queue()
6469 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, in bfq_init_queue()
6471 bfqd->idle_slice_timer.function = bfq_idle_slice_timer; in bfq_init_queue()
6473 bfqd->queue_weights_tree = RB_ROOT_CACHED; in bfq_init_queue()
6474 bfqd->num_groups_with_pending_reqs = 0; in bfq_init_queue()
6476 INIT_LIST_HEAD(&bfqd->active_list); in bfq_init_queue()
6477 INIT_LIST_HEAD(&bfqd->idle_list); in bfq_init_queue()
6478 INIT_HLIST_HEAD(&bfqd->burst_list); in bfq_init_queue()
6480 bfqd->hw_tag = -1; in bfq_init_queue()
6481 bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue); in bfq_init_queue()
6483 bfqd->bfq_max_budget = bfq_default_max_budget; in bfq_init_queue()
6485 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; in bfq_init_queue()
6486 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; in bfq_init_queue()
6487 bfqd->bfq_back_max = bfq_back_max; in bfq_init_queue()
6488 bfqd->bfq_back_penalty = bfq_back_penalty; in bfq_init_queue()
6489 bfqd->bfq_slice_idle = bfq_slice_idle; in bfq_init_queue()
6490 bfqd->bfq_timeout = bfq_timeout; in bfq_init_queue()
6492 bfqd->bfq_requests_within_timer = 120; in bfq_init_queue()
6494 bfqd->bfq_large_burst_thresh = 8; in bfq_init_queue()
6495 bfqd->bfq_burst_interval = msecs_to_jiffies(180); in bfq_init_queue()
6497 bfqd->low_latency = true; in bfq_init_queue()
6500 * Trade-off between responsiveness and fairness. in bfq_init_queue()
6502 bfqd->bfq_wr_coeff = 30; in bfq_init_queue()
6503 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); in bfq_init_queue()
6504 bfqd->bfq_wr_max_time = 0; in bfq_init_queue()
6505 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); in bfq_init_queue()
6506 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); in bfq_init_queue()
6507 bfqd->bfq_wr_max_softrt_rate = 7000; /* in bfq_init_queue()
6510 * high-definition compressed in bfq_init_queue()
6513 bfqd->wr_busy_queues = 0; in bfq_init_queue()
6519 bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] * in bfq_init_queue()
6520 ref_wr_duration[blk_queue_nonrot(bfqd->queue)]; in bfq_init_queue()
6521 bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3; in bfq_init_queue()
6523 spin_lock_init(&bfqd->lock); in bfq_init_queue()
6528 * (bfq_create_group_hierarchy->blkcg_activate_policy-> in bfq_init_queue()
6536 * other inconsistencies, the blk-mq stack must then refrain in bfq_init_queue()
6540 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); in bfq_init_queue()
6541 if (!bfqd->root_group) in bfq_init_queue()
6543 bfq_init_root_group(bfqd->root_group, bfqd); in bfq_init_queue()
6544 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); in bfq_init_queue()
6551 kobject_put(&eq->kobj); in bfq_init_queue()
6552 return -ENOMEM; in bfq_init_queue()
6564 return -ENOMEM; in bfq_slab_setup()
6587 struct bfq_data *bfqd = e->elevator_data; \
6595 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
6596 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
6597 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
6598 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
6599 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
6600 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
6601 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
6602 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
6603 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
6609 struct bfq_data *bfqd = e->elevator_data; \
6614 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
6621 struct bfq_data *bfqd = e->elevator_data; \
6640 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
6642 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
6644 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
6645 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
6647 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
6653 struct bfq_data *bfqd = e->elevator_data; \
6667 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
6674 struct bfq_data *bfqd = e->elevator_data; in bfq_max_budget_store()
6683 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); in bfq_max_budget_store()
6687 bfqd->bfq_max_budget = __data; in bfq_max_budget_store()
6690 bfqd->bfq_user_max_budget = __data; in bfq_max_budget_store()
6702 struct bfq_data *bfqd = e->elevator_data; in bfq_timeout_sync_store()
6715 bfqd->bfq_timeout = msecs_to_jiffies(__data); in bfq_timeout_sync_store()
6716 if (bfqd->bfq_user_max_budget == 0) in bfq_timeout_sync_store()
6717 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); in bfq_timeout_sync_store()
6725 struct bfq_data *bfqd = e->elevator_data; in bfq_strict_guarantees_store()
6735 if (!bfqd->strict_guarantees && __data == 1 in bfq_strict_guarantees_store()
6736 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) in bfq_strict_guarantees_store()
6737 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; in bfq_strict_guarantees_store()
6739 bfqd->strict_guarantees = __data; in bfq_strict_guarantees_store()
6747 struct bfq_data *bfqd = e->elevator_data; in bfq_low_latency_store()
6757 if (__data == 0 && bfqd->low_latency != 0) in bfq_low_latency_store()
6759 bfqd->low_latency = __data; in bfq_low_latency_store()
6810 MODULE_ALIAS("bfq-iosched");
6822 ret = -ENOMEM; in bfq_init()
6836 * scheduler cannot rely on a peak-rate-evaluation workload to in bfq_init()