Lines Matching +full:write +full:- +full:1 +full:- +full:bps
1 // SPDX-License-Identifier: GPL-2.0
14 #include "blk-cgroup-rwstat.h"
15 #include "blk-stat.h"
16 #include "blk-throttle.h"
18 /* Max dispatch from a group in 1 round */
41 /* Total Number of queued bios on READ and WRITE lists */
56 return pd_to_blkg(&tg->pd); in tg_to_blkg()
60 * sq_to_tg - return the throl_grp the specified service queue belongs to
63 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
68 if (sq && sq->parent_sq) in sq_to_tg()
75 * sq_to_td - return throtl_data the specified service queue belongs to
86 return tg->td; in sq_to_td()
95 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) in tg_bps_limit()
98 return tg->bps[rw]; in tg_bps_limit()
105 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) in tg_iops_limit()
108 return tg->iops[rw]; in tg_iops_limit()
112 * throtl_log - log debug message via blktrace
125 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
128 blk_add_cgroup_trace_msg(__td->queue, \
129 &tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
131 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
140 return bio->bi_iter.bi_size; in throtl_bio_data_size()
145 INIT_LIST_HEAD(&qn->node); in throtl_qnode_init()
146 bio_list_init(&qn->bios); in throtl_qnode_init()
147 qn->tg = tg; in throtl_qnode_init()
151 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
154 * @queued: the service_queue->queued[] list @qn belongs to
157 * @qn->tg's reference count is bumped when @qn is activated. See the
163 bio_list_add(&qn->bios, bio); in throtl_qnode_add_bio()
164 if (list_empty(&qn->node)) { in throtl_qnode_add_bio()
165 list_add_tail(&qn->node, queued); in throtl_qnode_add_bio()
166 blkg_get(tg_to_blkg(qn->tg)); in throtl_qnode_add_bio()
171 * throtl_peek_queued - peek the first bio on a qnode list
183 bio = bio_list_peek(&qn->bios); in throtl_peek_queued()
189 * throtl_pop_queued - pop the first bio form a qnode list
195 * that the popping order is round-robin.
212 bio = bio_list_pop(&qn->bios); in throtl_pop_queued()
215 if (bio_list_empty(&qn->bios)) { in throtl_pop_queued()
216 list_del_init(&qn->node); in throtl_pop_queued()
218 *tg_to_put = qn->tg; in throtl_pop_queued()
220 blkg_put(tg_to_blkg(qn->tg)); in throtl_pop_queued()
222 list_move_tail(&qn->node, queued); in throtl_pop_queued()
231 INIT_LIST_HEAD(&sq->queued[READ]); in throtl_service_queue_init()
232 INIT_LIST_HEAD(&sq->queued[WRITE]); in throtl_service_queue_init()
233 sq->pending_tree = RB_ROOT_CACHED; in throtl_service_queue_init()
234 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); in throtl_service_queue_init()
243 tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id); in throtl_pd_alloc()
247 if (blkg_rwstat_init(&tg->stat_bytes, gfp)) in throtl_pd_alloc()
250 if (blkg_rwstat_init(&tg->stat_ios, gfp)) in throtl_pd_alloc()
253 throtl_service_queue_init(&tg->service_queue); in throtl_pd_alloc()
255 for (rw = READ; rw <= WRITE; rw++) { in throtl_pd_alloc()
256 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_alloc()
257 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_alloc()
260 RB_CLEAR_NODE(&tg->rb_node); in throtl_pd_alloc()
261 tg->bps[READ] = U64_MAX; in throtl_pd_alloc()
262 tg->bps[WRITE] = U64_MAX; in throtl_pd_alloc()
263 tg->iops[READ] = UINT_MAX; in throtl_pd_alloc()
264 tg->iops[WRITE] = UINT_MAX; in throtl_pd_alloc()
266 return &tg->pd; in throtl_pd_alloc()
269 blkg_rwstat_exit(&tg->stat_bytes); in throtl_pd_alloc()
279 struct throtl_data *td = blkg->q->td; in throtl_pd_init()
280 struct throtl_service_queue *sq = &tg->service_queue; in throtl_pd_init()
286 * read_bps limit is set on a parent group, summary bps of in throtl_pd_init()
296 sq->parent_sq = &td->service_queue; in throtl_pd_init()
297 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) in throtl_pd_init()
298 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; in throtl_pd_init()
299 tg->td = td; in throtl_pd_init()
309 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); in tg_update_has_rules()
312 for (rw = READ; rw <= WRITE; rw++) { in tg_update_has_rules()
313 tg->has_rules_iops[rw] = in tg_update_has_rules()
314 (parent_tg && parent_tg->has_rules_iops[rw]) || in tg_update_has_rules()
316 tg->has_rules_bps[rw] = in tg_update_has_rules()
317 (parent_tg && parent_tg->has_rules_bps[rw]) || in tg_update_has_rules()
336 timer_delete_sync(&tg->service_queue.pending_timer); in throtl_pd_free()
337 blkg_rwstat_exit(&tg->stat_bytes); in throtl_pd_free()
338 blkg_rwstat_exit(&tg->stat_ios); in throtl_pd_free()
347 n = rb_first_cached(&parent_sq->pending_tree); in throtl_rb_first()
357 rb_erase_cached(n, &parent_sq->pending_tree); in throtl_rb_erase()
369 parent_sq->first_pending_disptime = tg->disptime; in update_min_dispatch_time()
374 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; in tg_service_queue_add()
375 struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node; in tg_service_queue_add()
378 unsigned long key = tg->disptime; in tg_service_queue_add()
385 if (time_before(key, __tg->disptime)) in tg_service_queue_add()
386 node = &parent->rb_left; in tg_service_queue_add()
388 node = &parent->rb_right; in tg_service_queue_add()
393 rb_link_node(&tg->rb_node, parent, node); in tg_service_queue_add()
394 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree, in tg_service_queue_add()
400 if (!(tg->flags & THROTL_TG_PENDING)) { in throtl_enqueue_tg()
402 tg->flags |= THROTL_TG_PENDING; in throtl_enqueue_tg()
403 tg->service_queue.parent_sq->nr_pending++; in throtl_enqueue_tg()
409 if (tg->flags & THROTL_TG_PENDING) { in throtl_dequeue_tg()
411 tg->service_queue.parent_sq; in throtl_dequeue_tg()
413 throtl_rb_erase(&tg->rb_node, parent_sq); in throtl_dequeue_tg()
414 --parent_sq->nr_pending; in throtl_dequeue_tg()
415 tg->flags &= ~THROTL_TG_PENDING; in throtl_dequeue_tg()
423 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice; in throtl_schedule_pending_timer()
434 mod_timer(&sq->pending_timer, expires); in throtl_schedule_pending_timer()
436 expires - jiffies, jiffies); in throtl_schedule_pending_timer()
440 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
444 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
454 * delay before dispatch starts even if @sq->first_pending_disptime is not
461 if (!sq->nr_pending) in throtl_schedule_next_dispatch()
467 if (force || time_after(sq->first_pending_disptime, jiffies)) { in throtl_schedule_next_dispatch()
468 throtl_schedule_pending_timer(sq, sq->first_pending_disptime); in throtl_schedule_next_dispatch()
479 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
480 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
488 if (time_after(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
489 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
491 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
492 throtl_log(&tg->service_queue, in throtl_start_new_slice_with_credit()
494 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
495 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
502 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
503 tg->io_disp[rw] = 0; in throtl_start_new_slice()
505 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
506 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
508 throtl_log(&tg->service_queue, in throtl_start_new_slice()
510 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
511 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
517 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
524 throtl_log(&tg->service_queue, in throtl_extend_slice()
526 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
527 tg->slice_end[rw], jiffies); in throtl_extend_slice()
533 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
547 * 1 then at max jiffy elapsed should be equivalent of 1 second as we in calculate_io_allowed()
548 * will allow dispatch after 1 second and after that slice should in calculate_io_allowed()
569 if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62) in calculate_bytes_allowed()
581 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
584 * If bps are unlimited (-1), then time slice don't get in throtl_trim_slice()
598 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
600 time_elapsed = rounddown(jiffies - tg->slice_start[rw], in throtl_trim_slice()
601 tg->td->throtl_slice); in throtl_trim_slice()
603 if (time_elapsed < tg->td->throtl_slice * 2) in throtl_trim_slice()
614 time_elapsed -= tg->td->throtl_slice; in throtl_trim_slice()
621 if ((long long)tg->bytes_disp[rw] >= bytes_trim) in throtl_trim_slice()
622 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_slice()
624 tg->bytes_disp[rw] = 0; in throtl_trim_slice()
626 if ((int)tg->io_disp[rw] >= io_trim) in throtl_trim_slice()
627 tg->io_disp[rw] -= io_trim; in throtl_trim_slice()
629 tg->io_disp[rw] = 0; in throtl_trim_slice()
631 tg->slice_start[rw] += time_elapsed; in throtl_trim_slice()
633 throtl_log(&tg->service_queue, in throtl_trim_slice()
635 rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice, in throtl_trim_slice()
636 bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw], in throtl_trim_slice()
643 unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw]; in __tg_update_carryover()
654 *bytes = calculate_bytes_allowed(bps_limit, jiffy_elapsed) - in __tg_update_carryover()
655 tg->bytes_disp[rw]; in __tg_update_carryover()
657 *ios = calculate_io_allowed(iops_limit, jiffy_elapsed) - in __tg_update_carryover()
658 tg->io_disp[rw]; in __tg_update_carryover()
659 tg->bytes_disp[rw] -= *bytes; in __tg_update_carryover()
660 tg->io_disp[rw] -= *ios; in __tg_update_carryover()
668 if (tg->service_queue.nr_queued[READ]) in tg_update_carryover()
670 if (tg->service_queue.nr_queued[WRITE]) in tg_update_carryover()
671 __tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]); in tg_update_carryover()
674 throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__, in tg_update_carryover()
675 bytes[READ], bytes[WRITE], ios[READ], ios[WRITE]); in tg_update_carryover()
689 jiffy_elapsed = jiffies - tg->slice_start[rw]; in tg_within_iops_limit()
692 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); in tg_within_iops_limit()
694 if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed) in tg_within_iops_limit()
698 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed; in tg_within_iops_limit()
701 jiffy_wait = max(jiffy_wait, HZ / iops_limit + 1); in tg_within_iops_limit()
719 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_within_bps_limit()
723 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_within_bps_limit()
725 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_within_bps_limit()
727 if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed) in tg_within_bps_limit()
731 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; in tg_within_bps_limit()
735 jiffy_wait = 1; in tg_within_bps_limit()
741 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); in tg_within_bps_limit()
747 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
763 BUG_ON(tg->service_queue.nr_queued[rw] && in tg_may_dispatch()
764 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
766 /* If tg->bps = -1, then BW is unlimited */ in tg_may_dispatch()
768 tg->flags & THROTL_TG_CANCELING) { in tg_may_dispatch()
781 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) in tg_may_dispatch()
784 if (time_before(tg->slice_end[rw], in tg_may_dispatch()
785 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
787 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
803 if (time_before(tg->slice_end[rw], jiffies + max_wait)) in tg_may_dispatch()
816 tg->bytes_disp[rw] += bio_size; in throtl_charge_bio()
818 tg->io_disp[rw]++; in throtl_charge_bio()
822 * throtl_add_bio_tg - add a bio to the specified throtl_grp
828 * tg->qnode_on_self[] is used.
833 struct throtl_service_queue *sq = &tg->service_queue; in throtl_add_bio_tg()
837 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
845 if (!sq->nr_queued[rw]) in throtl_add_bio_tg()
846 tg->flags |= THROTL_TG_WAS_EMPTY; in throtl_add_bio_tg()
848 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); in throtl_add_bio_tg()
850 sq->nr_queued[rw]++; in throtl_add_bio_tg()
856 struct throtl_service_queue *sq = &tg->service_queue; in tg_update_disptime()
857 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; in tg_update_disptime()
860 bio = throtl_peek_queued(&sq->queued[READ]); in tg_update_disptime()
864 bio = throtl_peek_queued(&sq->queued[WRITE]); in tg_update_disptime()
872 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); in tg_update_disptime()
873 tg->disptime = disptime; in tg_update_disptime()
877 tg->flags &= ~THROTL_TG_WAS_EMPTY; in tg_update_disptime()
885 child_tg->slice_start[rw]); in start_parent_slice_with_credit()
892 struct throtl_service_queue *sq = &tg->service_queue; in tg_dispatch_one_bio()
893 struct throtl_service_queue *parent_sq = sq->parent_sq; in tg_dispatch_one_bio()
904 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); in tg_dispatch_one_bio()
905 sq->nr_queued[rw]--; in tg_dispatch_one_bio()
912 * @td->service_queue, @bio is ready to be issued. Put it on its in tg_dispatch_one_bio()
917 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
921 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
922 &parent_sq->queued[rw]); in tg_dispatch_one_bio()
923 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
924 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
935 struct throtl_service_queue *sq = &tg->service_queue; in throtl_dispatch_tg()
938 unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads; in throtl_dispatch_tg()
943 while ((bio = throtl_peek_queued(&sq->queued[READ])) && in throtl_dispatch_tg()
953 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && in throtl_dispatch_tg()
956 tg_dispatch_one_bio(tg, WRITE); in throtl_dispatch_tg()
970 while (1) { in throtl_select_dispatch()
974 if (!parent_sq->nr_pending) in throtl_select_dispatch()
981 if (time_before(jiffies, tg->disptime)) in throtl_select_dispatch()
986 sq = &tg->service_queue; in throtl_select_dispatch()
987 if (sq->nr_queued[READ] || sq->nr_queued[WRITE]) in throtl_select_dispatch()
1000 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1011 * the top-level service_tree is reached, throtl_data->dispatch_work is
1026 q = tg->pd.blkg->q; in throtl_pending_timer_fn()
1028 q = td->queue; in throtl_pending_timer_fn()
1030 spin_lock_irq(&q->queue_lock); in throtl_pending_timer_fn()
1032 if (!q->root_blkg) in throtl_pending_timer_fn()
1036 parent_sq = sq->parent_sq; in throtl_pending_timer_fn()
1040 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", in throtl_pending_timer_fn()
1041 sq->nr_queued[READ] + sq->nr_queued[WRITE], in throtl_pending_timer_fn()
1042 sq->nr_queued[READ], sq->nr_queued[WRITE]); in throtl_pending_timer_fn()
1054 spin_unlock_irq(&q->queue_lock); in throtl_pending_timer_fn()
1056 spin_lock_irq(&q->queue_lock); in throtl_pending_timer_fn()
1064 if (tg->flags & THROTL_TG_WAS_EMPTY) { in throtl_pending_timer_fn()
1074 /* reached the top-level, queue issuing */ in throtl_pending_timer_fn()
1075 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_pending_timer_fn()
1078 spin_unlock_irq(&q->queue_lock); in throtl_pending_timer_fn()
1082 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1086 * of throtl_data->service_queue. Those bios are ready and issued by this
1093 struct throtl_service_queue *td_sq = &td->service_queue; in blk_throtl_dispatch_work_fn()
1094 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn()
1102 spin_lock_irq(&q->queue_lock); in blk_throtl_dispatch_work_fn()
1103 for (rw = READ; rw <= WRITE; rw++) in blk_throtl_dispatch_work_fn()
1104 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) in blk_throtl_dispatch_work_fn()
1106 spin_unlock_irq(&q->queue_lock); in blk_throtl_dispatch_work_fn()
1141 &blkcg_policy_throtl, seq_cft(sf)->private, false); in tg_print_conf_u64()
1148 &blkcg_policy_throtl, seq_cft(sf)->private, false); in tg_print_conf_uint()
1154 struct throtl_service_queue *sq = &tg->service_queue; in tg_conf_updated()
1158 throtl_log(&tg->service_queue, in tg_conf_updated()
1160 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1161 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1169 * blk-throttle. in tg_conf_updated()
1172 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1177 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent || in tg_conf_updated()
1178 !blkg->parent->parent) in tg_conf_updated()
1192 throtl_start_new_slice(tg, WRITE, false); in tg_conf_updated()
1194 if (tg->flags & THROTL_TG_PENDING) { in tg_conf_updated()
1196 throtl_schedule_next_dispatch(sq->parent_sq, true); in tg_conf_updated()
1202 struct request_queue *q = disk->queue; in blk_throtl_init()
1207 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); in blk_throtl_init()
1209 return -ENOMEM; in blk_throtl_init()
1211 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); in blk_throtl_init()
1212 throtl_service_queue_init(&td->service_queue); in blk_throtl_init()
1218 memflags = blk_mq_freeze_queue(disk->queue); in blk_throtl_init()
1219 blk_mq_quiesce_queue(disk->queue); in blk_throtl_init()
1221 q->td = td; in blk_throtl_init()
1222 td->queue = q; in blk_throtl_init()
1227 q->td = NULL; in blk_throtl_init()
1233 td->throtl_slice = DFL_THROTL_SLICE_SSD; in blk_throtl_init()
1235 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_init()
1236 td->track_bio_latency = !queue_is_mq(q); in blk_throtl_init()
1237 if (!td->track_bio_latency) in blk_throtl_init()
1241 blk_mq_unquiesce_queue(disk->queue); in blk_throtl_init()
1242 blk_mq_unfreeze_queue(disk->queue, memflags); in blk_throtl_init()
1263 if (!blk_throtl_activated(ctx.bdev->bd_queue)) { in tg_set_conf()
1264 ret = blk_throtl_init(ctx.bdev->bd_disk); in tg_set_conf()
1273 ret = -EINVAL; in tg_set_conf()
1274 if (sscanf(ctx.body, "%llu", &v) != 1) in tg_set_conf()
1283 *(u64 *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1285 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1310 seq_cft(sf)->private, true); in tg_print_rwstat()
1328 seq_cft(sf)->private, true); in tg_print_rwstat_recursive()
1335 .private = offsetof(struct throtl_grp, bps[READ]),
1337 .write = tg_set_conf_u64,
1341 .private = offsetof(struct throtl_grp, bps[WRITE]),
1343 .write = tg_set_conf_u64,
1349 .write = tg_set_conf_uint,
1353 .private = offsetof(struct throtl_grp, iops[WRITE]),
1355 .write = tg_set_conf_uint,
1384 const char *dname = blkg_dev_name(pd->blkg); in tg_prfill_limit()
1394 if (tg->bps[READ] == bps_dft && in tg_prfill_limit()
1395 tg->bps[WRITE] == bps_dft && in tg_prfill_limit()
1396 tg->iops[READ] == iops_dft && in tg_prfill_limit()
1397 tg->iops[WRITE] == iops_dft) in tg_prfill_limit()
1401 if (tg->bps[READ] == U64_MAX) in tg_prfill_limit()
1404 seq_printf(sf, " rbps=%llu", tg->bps[READ]); in tg_prfill_limit()
1406 if (tg->bps[WRITE] == U64_MAX) in tg_prfill_limit()
1409 seq_printf(sf, " wbps=%llu", tg->bps[WRITE]); in tg_prfill_limit()
1411 if (tg->iops[READ] == UINT_MAX) in tg_prfill_limit()
1414 seq_printf(sf, " riops=%u", tg->iops[READ]); in tg_prfill_limit()
1416 if (tg->iops[WRITE] == UINT_MAX) in tg_prfill_limit()
1419 seq_printf(sf, " wiops=%u", tg->iops[WRITE]); in tg_prfill_limit()
1428 &blkcg_policy_throtl, seq_cft(sf)->private, false); in tg_print_limit()
1447 if (!blk_throtl_activated(ctx.bdev->bd_queue)) { in tg_set_limit()
1448 ret = blk_throtl_init(ctx.bdev->bd_disk); in tg_set_limit()
1460 v[0] = tg->bps[READ]; in tg_set_limit()
1461 v[1] = tg->bps[WRITE]; in tg_set_limit()
1462 v[2] = tg->iops[READ]; in tg_set_limit()
1463 v[3] = tg->iops[WRITE]; in tg_set_limit()
1471 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1) in tg_set_limit()
1477 ret = -EINVAL; in tg_set_limit()
1480 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max"))) in tg_set_limit()
1483 ret = -ERANGE; in tg_set_limit()
1487 ret = -EINVAL; in tg_set_limit()
1491 v[1] = val; in tg_set_limit()
1500 tg->bps[READ] = v[0]; in tg_set_limit()
1501 tg->bps[WRITE] = v[1]; in tg_set_limit()
1502 tg->iops[READ] = v[2]; in tg_set_limit()
1503 tg->iops[WRITE] = v[3]; in tg_set_limit()
1517 .write = tg_set_limit,
1524 struct throtl_data *td = q->td; in throtl_shutdown_wq()
1526 cancel_work_sync(&td->dispatch_work); in throtl_shutdown_wq()
1531 struct throtl_service_queue *sq = &tg->service_queue; in tg_flush_bios()
1533 if (tg->flags & THROTL_TG_CANCELING) in tg_flush_bios()
1539 tg->flags |= THROTL_TG_CANCELING; in tg_flush_bios()
1548 if (!(tg->flags & THROTL_TG_PENDING)) in tg_flush_bios()
1557 throtl_schedule_pending_timer(sq, jiffies + 1); in tg_flush_bios()
1578 struct request_queue *q = disk->queue; in blk_throtl_cancel_bios()
1585 spin_lock_irq(&q->queue_lock); in blk_throtl_cancel_bios()
1592 blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) { in blk_throtl_cancel_bios()
1604 spin_unlock_irq(&q->queue_lock); in blk_throtl_cancel_bios()
1609 /* throtl is FIFO - if bios are already queued, should queue */ in tg_within_limit()
1610 if (tg->service_queue.nr_queued[rw]) in tg_within_limit()
1618 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in __blk_throtl_bio()
1619 struct blkcg_gq *blkg = bio->bi_blkg; in __blk_throtl_bio()
1625 struct throtl_data *td = tg->td; in __blk_throtl_bio()
1628 spin_lock_irq(&q->queue_lock); in __blk_throtl_bio()
1629 sq = &tg->service_queue; in __blk_throtl_bio()
1668 qn = &tg->qnode_on_parent[rw]; in __blk_throtl_bio()
1669 sq = sq->parent_sq; in __blk_throtl_bio()
1677 /* out-of-limit, queue to @tg */ in __blk_throtl_bio()
1678 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", in __blk_throtl_bio()
1680 tg->bytes_disp[rw], bio->bi_iter.bi_size, in __blk_throtl_bio()
1682 tg->io_disp[rw], tg_iops_limit(tg, rw), in __blk_throtl_bio()
1683 sq->nr_queued[READ], sq->nr_queued[WRITE]); in __blk_throtl_bio()
1685 td->nr_queued[rw]++; in __blk_throtl_bio()
1695 if (tg->flags & THROTL_TG_WAS_EMPTY) { in __blk_throtl_bio()
1697 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); in __blk_throtl_bio()
1701 spin_unlock_irq(&q->queue_lock); in __blk_throtl_bio()
1709 struct request_queue *q = disk->queue; in blk_throtl_exit()
1714 timer_delete_sync(&q->td->service_queue.pending_timer); in blk_throtl_exit()
1717 kfree(q->td); in blk_throtl_exit()