1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common Block IO controller cgroup interface
4 *
5 * Based on ideas and code from CFQ, CFS and BFQ:
6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 *
8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
9 * Paolo Valente <paolo.valente@unimore.it>
10 *
11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
12 * Nauman Rafique <nauman@google.com>
13 *
14 * For policy-specific per-blkcg data:
15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
16 * Arianna Avanzini <avanzini.arianna@gmail.com>
17 */
18 #include <linux/ioprio.h>
19 #include <linux/kdev_t.h>
20 #include <linux/module.h>
21 #include <linux/sched/signal.h>
22 #include <linux/err.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/wait_bit.h>
28 #include <linux/atomic.h>
29 #include <linux/ctype.h>
30 #include <linux/resume_user_mode.h>
31 #include <linux/psi.h>
32 #include <linux/part_stat.h>
33 #include "blk.h"
34 #include "blk-cgroup.h"
35 #include "blk-ioprio.h"
36 #include "blk-throttle.h"
37
38 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu);
39
40 /*
41 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
42 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
43 * policy [un]register operations including cgroup file additions /
44 * removals. Putting cgroup file registration outside blkcg_pol_mutex
45 * allows grabbing it from cgroup callbacks.
46 */
47 static DEFINE_MUTEX(blkcg_pol_register_mutex);
48 static DEFINE_MUTEX(blkcg_pol_mutex);
49
50 struct blkcg blkcg_root;
51 EXPORT_SYMBOL_GPL(blkcg_root);
52
53 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
54 EXPORT_SYMBOL_GPL(blkcg_root_css);
55
56 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
57
58 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
59
60 bool blkcg_debug_stats = false;
61
62 static DEFINE_RAW_SPINLOCK(blkg_stat_lock);
63
64 #define BLKG_DESTROY_BATCH_SIZE 64
65
66 /*
67 * Lockless lists for tracking IO stats update
68 *
69 * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg).
70 * There are multiple blkg's (one for each block device) attached to each
71 * blkcg. The rstat code keeps track of which cpu has IO stats updated,
72 * but it doesn't know which blkg has the updated stats. If there are many
73 * block devices in a system, the cost of iterating all the blkg's to flush
74 * out the IO stats can be high. To reduce such overhead, a set of percpu
75 * lockless lists (lhead) per blkcg are used to track the set of recently
76 * updated iostat_cpu's since the last flush. An iostat_cpu will be put
77 * onto the lockless list on the update side [blk_cgroup_bio_start()] if
78 * not there yet and then removed when being flushed [blkcg_rstat_flush()].
79 * References to blkg are gotten and then put back in the process to
80 * protect against blkg removal.
81 *
82 * Return: 0 if successful or -ENOMEM if allocation fails.
83 */
init_blkcg_llists(struct blkcg * blkcg)84 static int init_blkcg_llists(struct blkcg *blkcg)
85 {
86 int cpu;
87
88 blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL);
89 if (!blkcg->lhead)
90 return -ENOMEM;
91
92 for_each_possible_cpu(cpu)
93 init_llist_head(per_cpu_ptr(blkcg->lhead, cpu));
94 return 0;
95 }
96
97 /**
98 * blkcg_css - find the current css
99 *
100 * Find the css associated with either the kthread or the current task.
101 * This may return a dying css, so it is up to the caller to use tryget logic
102 * to confirm it is alive and well.
103 */
blkcg_css(void)104 static struct cgroup_subsys_state *blkcg_css(void)
105 {
106 struct cgroup_subsys_state *css;
107
108 css = kthread_blkcg();
109 if (css)
110 return css;
111 return task_css(current, io_cgrp_id);
112 }
113
blkg_free_workfn(struct work_struct * work)114 static void blkg_free_workfn(struct work_struct *work)
115 {
116 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
117 free_work);
118 struct request_queue *q = blkg->q;
119 int i;
120
121 /*
122 * pd_free_fn() can also be called from blkcg_deactivate_policy(),
123 * in order to make sure pd_free_fn() is called in order, the deletion
124 * of the list blkg->q_node is delayed to here from blkg_destroy(), and
125 * blkcg_mutex is used to synchronize blkg_free_workfn() and
126 * blkcg_deactivate_policy().
127 */
128 mutex_lock(&q->blkcg_mutex);
129 for (i = 0; i < BLKCG_MAX_POLS; i++)
130 if (blkg->pd[i])
131 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
132 if (blkg->parent)
133 blkg_put(blkg->parent);
134 spin_lock_irq(&q->queue_lock);
135 list_del_init(&blkg->q_node);
136 spin_unlock_irq(&q->queue_lock);
137 mutex_unlock(&q->blkcg_mutex);
138
139 blk_put_queue(q);
140 free_percpu(blkg->iostat_cpu);
141 percpu_ref_exit(&blkg->refcnt);
142 kfree(blkg);
143 }
144
145 /**
146 * blkg_free - free a blkg
147 * @blkg: blkg to free
148 *
149 * Free @blkg which may be partially allocated.
150 */
blkg_free(struct blkcg_gq * blkg)151 static void blkg_free(struct blkcg_gq *blkg)
152 {
153 if (!blkg)
154 return;
155
156 /*
157 * Both ->pd_free_fn() and request queue's release handler may
158 * sleep, so free us by scheduling one work func
159 */
160 INIT_WORK(&blkg->free_work, blkg_free_workfn);
161 schedule_work(&blkg->free_work);
162 }
163
__blkg_release(struct rcu_head * rcu)164 static void __blkg_release(struct rcu_head *rcu)
165 {
166 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
167 struct blkcg *blkcg = blkg->blkcg;
168 int cpu;
169
170 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO
171 WARN_ON(!bio_list_empty(&blkg->async_bios));
172 #endif
173 /*
174 * Flush all the non-empty percpu lockless lists before releasing
175 * us, given these stat belongs to us.
176 *
177 * blkg_stat_lock is for serializing blkg stat update
178 */
179 for_each_possible_cpu(cpu)
180 __blkcg_rstat_flush(blkcg, cpu);
181
182 /* release the blkcg and parent blkg refs this blkg has been holding */
183 css_put(&blkg->blkcg->css);
184 blkg_free(blkg);
185 }
186
187 /*
188 * A group is RCU protected, but having an rcu lock does not mean that one
189 * can access all the fields of blkg and assume these are valid. For
190 * example, don't try to follow throtl_data and request queue links.
191 *
192 * Having a reference to blkg under an rcu allows accesses to only values
193 * local to groups like group stats and group rate limits.
194 */
blkg_release(struct percpu_ref * ref)195 static void blkg_release(struct percpu_ref *ref)
196 {
197 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
198
199 call_rcu(&blkg->rcu_head, __blkg_release);
200 }
201
202 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO
203 static struct workqueue_struct *blkcg_punt_bio_wq;
204
blkg_async_bio_workfn(struct work_struct * work)205 static void blkg_async_bio_workfn(struct work_struct *work)
206 {
207 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
208 async_bio_work);
209 struct bio_list bios = BIO_EMPTY_LIST;
210 struct bio *bio;
211 struct blk_plug plug;
212 bool need_plug = false;
213
214 /* as long as there are pending bios, @blkg can't go away */
215 spin_lock(&blkg->async_bio_lock);
216 bio_list_merge_init(&bios, &blkg->async_bios);
217 spin_unlock(&blkg->async_bio_lock);
218
219 /* start plug only when bio_list contains at least 2 bios */
220 if (bios.head && bios.head->bi_next) {
221 need_plug = true;
222 blk_start_plug(&plug);
223 }
224 while ((bio = bio_list_pop(&bios)))
225 submit_bio(bio);
226 if (need_plug)
227 blk_finish_plug(&plug);
228 }
229
230 /*
231 * When a shared kthread issues a bio for a cgroup, doing so synchronously can
232 * lead to priority inversions as the kthread can be trapped waiting for that
233 * cgroup. Use this helper instead of submit_bio to punt the actual issuing to
234 * a dedicated per-blkcg work item to avoid such priority inversions.
235 */
blkcg_punt_bio_submit(struct bio * bio)236 void blkcg_punt_bio_submit(struct bio *bio)
237 {
238 struct blkcg_gq *blkg = bio->bi_blkg;
239
240 if (blkg->parent) {
241 spin_lock(&blkg->async_bio_lock);
242 bio_list_add(&blkg->async_bios, bio);
243 spin_unlock(&blkg->async_bio_lock);
244 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
245 } else {
246 /* never bounce for the root cgroup */
247 submit_bio(bio);
248 }
249 }
250 EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit);
251
blkcg_punt_bio_init(void)252 static int __init blkcg_punt_bio_init(void)
253 {
254 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
255 WQ_MEM_RECLAIM | WQ_FREEZABLE |
256 WQ_UNBOUND | WQ_SYSFS, 0);
257 if (!blkcg_punt_bio_wq)
258 return -ENOMEM;
259 return 0;
260 }
261 subsys_initcall(blkcg_punt_bio_init);
262 #endif /* CONFIG_BLK_CGROUP_PUNT_BIO */
263
264 /**
265 * bio_blkcg_css - return the blkcg CSS associated with a bio
266 * @bio: target bio
267 *
268 * This returns the CSS for the blkcg associated with a bio, or %NULL if not
269 * associated. Callers are expected to either handle %NULL or know association
270 * has been done prior to calling this.
271 */
bio_blkcg_css(struct bio * bio)272 struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
273 {
274 if (!bio || !bio->bi_blkg)
275 return NULL;
276 return &bio->bi_blkg->blkcg->css;
277 }
278 EXPORT_SYMBOL_GPL(bio_blkcg_css);
279
280 /**
281 * blkcg_parent - get the parent of a blkcg
282 * @blkcg: blkcg of interest
283 *
284 * Return the parent blkcg of @blkcg. Can be called anytime.
285 */
blkcg_parent(struct blkcg * blkcg)286 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
287 {
288 return css_to_blkcg(blkcg->css.parent);
289 }
290
291 /**
292 * blkg_alloc - allocate a blkg
293 * @blkcg: block cgroup the new blkg is associated with
294 * @disk: gendisk the new blkg is associated with
295 * @gfp_mask: allocation mask to use
296 *
297 * Allocate a new blkg associating @blkcg and @disk.
298 */
blkg_alloc(struct blkcg * blkcg,struct gendisk * disk,gfp_t gfp_mask)299 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
300 gfp_t gfp_mask)
301 {
302 struct blkcg_gq *blkg;
303 int i, cpu;
304
305 /* alloc and init base part */
306 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node);
307 if (!blkg)
308 return NULL;
309 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
310 goto out_free_blkg;
311 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask);
312 if (!blkg->iostat_cpu)
313 goto out_exit_refcnt;
314 if (!blk_get_queue(disk->queue))
315 goto out_free_iostat;
316
317 blkg->q = disk->queue;
318 INIT_LIST_HEAD(&blkg->q_node);
319 blkg->blkcg = blkcg;
320 blkg->iostat.blkg = blkg;
321 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO
322 spin_lock_init(&blkg->async_bio_lock);
323 bio_list_init(&blkg->async_bios);
324 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
325 #endif
326
327 u64_stats_init(&blkg->iostat.sync);
328 for_each_possible_cpu(cpu) {
329 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
330 per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg;
331 }
332
333 for (i = 0; i < BLKCG_MAX_POLS; i++) {
334 struct blkcg_policy *pol = blkcg_policy[i];
335 struct blkg_policy_data *pd;
336
337 if (!blkcg_policy_enabled(disk->queue, pol))
338 continue;
339
340 /* alloc per-policy data and attach it to blkg */
341 pd = pol->pd_alloc_fn(disk, blkcg, gfp_mask);
342 if (!pd)
343 goto out_free_pds;
344 blkg->pd[i] = pd;
345 pd->blkg = blkg;
346 pd->plid = i;
347 pd->online = false;
348 }
349
350 return blkg;
351
352 out_free_pds:
353 while (--i >= 0)
354 if (blkg->pd[i])
355 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
356 blk_put_queue(disk->queue);
357 out_free_iostat:
358 free_percpu(blkg->iostat_cpu);
359 out_exit_refcnt:
360 percpu_ref_exit(&blkg->refcnt);
361 out_free_blkg:
362 kfree(blkg);
363 return NULL;
364 }
365
366 /*
367 * If @new_blkg is %NULL, this function tries to allocate a new one as
368 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
369 */
blkg_create(struct blkcg * blkcg,struct gendisk * disk,struct blkcg_gq * new_blkg)370 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
371 struct blkcg_gq *new_blkg)
372 {
373 struct blkcg_gq *blkg;
374 int i, ret;
375
376 lockdep_assert_held(&disk->queue->queue_lock);
377
378 /* request_queue is dying, do not create/recreate a blkg */
379 if (blk_queue_dying(disk->queue)) {
380 ret = -ENODEV;
381 goto err_free_blkg;
382 }
383
384 /* blkg holds a reference to blkcg */
385 if (!css_tryget_online(&blkcg->css)) {
386 ret = -ENODEV;
387 goto err_free_blkg;
388 }
389
390 /* allocate */
391 if (!new_blkg) {
392 new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT);
393 if (unlikely(!new_blkg)) {
394 ret = -ENOMEM;
395 goto err_put_css;
396 }
397 }
398 blkg = new_blkg;
399
400 /* link parent */
401 if (blkcg_parent(blkcg)) {
402 blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue);
403 if (WARN_ON_ONCE(!blkg->parent)) {
404 ret = -ENODEV;
405 goto err_put_css;
406 }
407 blkg_get(blkg->parent);
408 }
409
410 /* invoke per-policy init */
411 for (i = 0; i < BLKCG_MAX_POLS; i++) {
412 struct blkcg_policy *pol = blkcg_policy[i];
413
414 if (blkg->pd[i] && pol->pd_init_fn)
415 pol->pd_init_fn(blkg->pd[i]);
416 }
417
418 /* insert */
419 spin_lock(&blkcg->lock);
420 ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
421 if (likely(!ret)) {
422 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
423 list_add(&blkg->q_node, &disk->queue->blkg_list);
424
425 for (i = 0; i < BLKCG_MAX_POLS; i++) {
426 struct blkcg_policy *pol = blkcg_policy[i];
427
428 if (blkg->pd[i]) {
429 if (pol->pd_online_fn)
430 pol->pd_online_fn(blkg->pd[i]);
431 blkg->pd[i]->online = true;
432 }
433 }
434 }
435 blkg->online = true;
436 spin_unlock(&blkcg->lock);
437
438 if (!ret)
439 return blkg;
440
441 /* @blkg failed fully initialized, use the usual release path */
442 blkg_put(blkg);
443 return ERR_PTR(ret);
444
445 err_put_css:
446 css_put(&blkcg->css);
447 err_free_blkg:
448 if (new_blkg)
449 blkg_free(new_blkg);
450 return ERR_PTR(ret);
451 }
452
453 /**
454 * blkg_lookup_create - lookup blkg, try to create one if not there
455 * @blkcg: blkcg of interest
456 * @disk: gendisk of interest
457 *
458 * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to
459 * create one. blkg creation is performed recursively from blkcg_root such
460 * that all non-root blkg's have access to the parent blkg. This function
461 * should be called under RCU read lock and takes @disk->queue->queue_lock.
462 *
463 * Returns the blkg or the closest blkg if blkg_create() fails as it walks
464 * down from root.
465 */
blkg_lookup_create(struct blkcg * blkcg,struct gendisk * disk)466 static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
467 struct gendisk *disk)
468 {
469 struct request_queue *q = disk->queue;
470 struct blkcg_gq *blkg;
471 unsigned long flags;
472
473 WARN_ON_ONCE(!rcu_read_lock_held());
474
475 blkg = blkg_lookup(blkcg, q);
476 if (blkg)
477 return blkg;
478
479 spin_lock_irqsave(&q->queue_lock, flags);
480 blkg = blkg_lookup(blkcg, q);
481 if (blkg) {
482 if (blkcg != &blkcg_root &&
483 blkg != rcu_dereference(blkcg->blkg_hint))
484 rcu_assign_pointer(blkcg->blkg_hint, blkg);
485 goto found;
486 }
487
488 /*
489 * Create blkgs walking down from blkcg_root to @blkcg, so that all
490 * non-root blkgs have access to their parents. Returns the closest
491 * blkg to the intended blkg should blkg_create() fail.
492 */
493 while (true) {
494 struct blkcg *pos = blkcg;
495 struct blkcg *parent = blkcg_parent(blkcg);
496 struct blkcg_gq *ret_blkg = q->root_blkg;
497
498 while (parent) {
499 blkg = blkg_lookup(parent, q);
500 if (blkg) {
501 /* remember closest blkg */
502 ret_blkg = blkg;
503 break;
504 }
505 pos = parent;
506 parent = blkcg_parent(parent);
507 }
508
509 blkg = blkg_create(pos, disk, NULL);
510 if (IS_ERR(blkg)) {
511 blkg = ret_blkg;
512 break;
513 }
514 if (pos == blkcg)
515 break;
516 }
517
518 found:
519 spin_unlock_irqrestore(&q->queue_lock, flags);
520 return blkg;
521 }
522
blkg_destroy(struct blkcg_gq * blkg)523 static void blkg_destroy(struct blkcg_gq *blkg)
524 {
525 struct blkcg *blkcg = blkg->blkcg;
526 int i;
527
528 lockdep_assert_held(&blkg->q->queue_lock);
529 lockdep_assert_held(&blkcg->lock);
530
531 /*
532 * blkg stays on the queue list until blkg_free_workfn(), see details in
533 * blkg_free_workfn(), hence this function can be called from
534 * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before
535 * blkg_free_workfn().
536 */
537 if (hlist_unhashed(&blkg->blkcg_node))
538 return;
539
540 for (i = 0; i < BLKCG_MAX_POLS; i++) {
541 struct blkcg_policy *pol = blkcg_policy[i];
542
543 if (blkg->pd[i] && blkg->pd[i]->online) {
544 blkg->pd[i]->online = false;
545 if (pol->pd_offline_fn)
546 pol->pd_offline_fn(blkg->pd[i]);
547 }
548 }
549
550 blkg->online = false;
551
552 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
553 hlist_del_init_rcu(&blkg->blkcg_node);
554
555 /*
556 * Both setting lookup hint to and clearing it from @blkg are done
557 * under queue_lock. If it's not pointing to @blkg now, it never
558 * will. Hint assignment itself can race safely.
559 */
560 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
561 rcu_assign_pointer(blkcg->blkg_hint, NULL);
562
563 /*
564 * Put the reference taken at the time of creation so that when all
565 * queues are gone, group can be destroyed.
566 */
567 percpu_ref_kill(&blkg->refcnt);
568 }
569
blkg_destroy_all(struct gendisk * disk)570 static void blkg_destroy_all(struct gendisk *disk)
571 {
572 struct request_queue *q = disk->queue;
573 struct blkcg_gq *blkg;
574 int count = BLKG_DESTROY_BATCH_SIZE;
575 int i;
576
577 restart:
578 spin_lock_irq(&q->queue_lock);
579 list_for_each_entry(blkg, &q->blkg_list, q_node) {
580 struct blkcg *blkcg = blkg->blkcg;
581
582 if (hlist_unhashed(&blkg->blkcg_node))
583 continue;
584
585 spin_lock(&blkcg->lock);
586 blkg_destroy(blkg);
587 spin_unlock(&blkcg->lock);
588
589 /*
590 * in order to avoid holding the spin lock for too long, release
591 * it when a batch of blkgs are destroyed.
592 */
593 if (!(--count)) {
594 count = BLKG_DESTROY_BATCH_SIZE;
595 spin_unlock_irq(&q->queue_lock);
596 cond_resched();
597 goto restart;
598 }
599 }
600
601 /*
602 * Mark policy deactivated since policy offline has been done, and
603 * the free is scheduled, so future blkcg_deactivate_policy() can
604 * be bypassed
605 */
606 for (i = 0; i < BLKCG_MAX_POLS; i++) {
607 struct blkcg_policy *pol = blkcg_policy[i];
608
609 if (pol)
610 __clear_bit(pol->plid, q->blkcg_pols);
611 }
612
613 q->root_blkg = NULL;
614 spin_unlock_irq(&q->queue_lock);
615
616 wake_up_var(&q->root_blkg);
617 }
618
blkg_iostat_set(struct blkg_iostat * dst,struct blkg_iostat * src)619 static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
620 {
621 int i;
622
623 for (i = 0; i < BLKG_IOSTAT_NR; i++) {
624 dst->bytes[i] = src->bytes[i];
625 dst->ios[i] = src->ios[i];
626 }
627 }
628
__blkg_clear_stat(struct blkg_iostat_set * bis)629 static void __blkg_clear_stat(struct blkg_iostat_set *bis)
630 {
631 struct blkg_iostat cur = {0};
632 unsigned long flags;
633
634 flags = u64_stats_update_begin_irqsave(&bis->sync);
635 blkg_iostat_set(&bis->cur, &cur);
636 blkg_iostat_set(&bis->last, &cur);
637 u64_stats_update_end_irqrestore(&bis->sync, flags);
638 }
639
blkg_clear_stat(struct blkcg_gq * blkg)640 static void blkg_clear_stat(struct blkcg_gq *blkg)
641 {
642 int cpu;
643
644 for_each_possible_cpu(cpu) {
645 struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu);
646
647 __blkg_clear_stat(s);
648 }
649 __blkg_clear_stat(&blkg->iostat);
650 }
651
blkcg_reset_stats(struct cgroup_subsys_state * css,struct cftype * cftype,u64 val)652 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
653 struct cftype *cftype, u64 val)
654 {
655 struct blkcg *blkcg = css_to_blkcg(css);
656 struct blkcg_gq *blkg;
657 int i;
658
659 pr_info_once("blkio.%s is deprecated\n", cftype->name);
660 mutex_lock(&blkcg_pol_mutex);
661 spin_lock_irq(&blkcg->lock);
662
663 /*
664 * Note that stat reset is racy - it doesn't synchronize against
665 * stat updates. This is a debug feature which shouldn't exist
666 * anyway. If you get hit by a race, retry.
667 */
668 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
669 blkg_clear_stat(blkg);
670 for (i = 0; i < BLKCG_MAX_POLS; i++) {
671 struct blkcg_policy *pol = blkcg_policy[i];
672
673 if (blkg->pd[i] && pol->pd_reset_stats_fn)
674 pol->pd_reset_stats_fn(blkg->pd[i]);
675 }
676 }
677
678 spin_unlock_irq(&blkcg->lock);
679 mutex_unlock(&blkcg_pol_mutex);
680 return 0;
681 }
682
blkg_dev_name(struct blkcg_gq * blkg)683 const char *blkg_dev_name(struct blkcg_gq *blkg)
684 {
685 if (!blkg->q->disk)
686 return NULL;
687 return bdi_dev_name(blkg->q->disk->bdi);
688 }
689
690 /**
691 * blkcg_print_blkgs - helper for printing per-blkg data
692 * @sf: seq_file to print to
693 * @blkcg: blkcg of interest
694 * @prfill: fill function to print out a blkg
695 * @pol: policy in question
696 * @data: data to be passed to @prfill
697 * @show_total: to print out sum of prfill return values or not
698 *
699 * This function invokes @prfill on each blkg of @blkcg if pd for the
700 * policy specified by @pol exists. @prfill is invoked with @sf, the
701 * policy data and @data and the matching queue lock held. If @show_total
702 * is %true, the sum of the return values from @prfill is printed with
703 * "Total" label at the end.
704 *
705 * This is to be used to construct print functions for
706 * cftype->read_seq_string method.
707 */
blkcg_print_blkgs(struct seq_file * sf,struct blkcg * blkcg,u64 (* prfill)(struct seq_file *,struct blkg_policy_data *,int),const struct blkcg_policy * pol,int data,bool show_total)708 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
709 u64 (*prfill)(struct seq_file *,
710 struct blkg_policy_data *, int),
711 const struct blkcg_policy *pol, int data,
712 bool show_total)
713 {
714 struct blkcg_gq *blkg;
715 u64 total = 0;
716
717 rcu_read_lock();
718 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
719 spin_lock_irq(&blkg->q->queue_lock);
720 if (blkcg_policy_enabled(blkg->q, pol))
721 total += prfill(sf, blkg->pd[pol->plid], data);
722 spin_unlock_irq(&blkg->q->queue_lock);
723 }
724 rcu_read_unlock();
725
726 if (show_total)
727 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
728 }
729 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
730
731 /**
732 * __blkg_prfill_u64 - prfill helper for a single u64 value
733 * @sf: seq_file to print to
734 * @pd: policy private data of interest
735 * @v: value to print
736 *
737 * Print @v to @sf for the device associated with @pd.
738 */
__blkg_prfill_u64(struct seq_file * sf,struct blkg_policy_data * pd,u64 v)739 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
740 {
741 const char *dname = blkg_dev_name(pd->blkg);
742
743 if (!dname)
744 return 0;
745
746 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
747 return v;
748 }
749 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
750
751 /**
752 * blkg_conf_init - initialize a blkg_conf_ctx
753 * @ctx: blkg_conf_ctx to initialize
754 * @input: input string
755 *
756 * Initialize @ctx which can be used to parse blkg config input string @input.
757 * Once initialized, @ctx can be used with blkg_conf_open_bdev() and
758 * blkg_conf_prep(), and must be cleaned up with blkg_conf_exit().
759 */
blkg_conf_init(struct blkg_conf_ctx * ctx,char * input)760 void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input)
761 {
762 *ctx = (struct blkg_conf_ctx){ .input = input };
763 }
764 EXPORT_SYMBOL_GPL(blkg_conf_init);
765
766 /**
767 * blkg_conf_open_bdev - parse and open bdev for per-blkg config update
768 * @ctx: blkg_conf_ctx initialized with blkg_conf_init()
769 *
770 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update from
771 * @ctx->input and get and store the matching bdev in @ctx->bdev. @ctx->body is
772 * set to point past the device node prefix.
773 *
774 * This function may be called multiple times on @ctx and the extra calls become
775 * NOOPs. blkg_conf_prep() implicitly calls this function. Use this function
776 * explicitly if bdev access is needed without resolving the blkcg / policy part
777 * of @ctx->input. Returns -errno on error.
778 */
blkg_conf_open_bdev(struct blkg_conf_ctx * ctx)779 int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
780 {
781 char *input = ctx->input;
782 unsigned int major, minor;
783 struct block_device *bdev;
784 int key_len;
785
786 if (ctx->bdev)
787 return 0;
788
789 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
790 return -EINVAL;
791
792 input += key_len;
793 if (!isspace(*input))
794 return -EINVAL;
795 input = skip_spaces(input);
796
797 bdev = blkdev_get_no_open(MKDEV(major, minor), false);
798 if (!bdev)
799 return -ENODEV;
800 if (bdev_is_partition(bdev)) {
801 blkdev_put_no_open(bdev);
802 return -ENODEV;
803 }
804
805 mutex_lock(&bdev->bd_queue->rq_qos_mutex);
806 if (!disk_live(bdev->bd_disk)) {
807 blkdev_put_no_open(bdev);
808 mutex_unlock(&bdev->bd_queue->rq_qos_mutex);
809 return -ENODEV;
810 }
811
812 ctx->body = input;
813 ctx->bdev = bdev;
814 return 0;
815 }
816 /*
817 * Similar to blkg_conf_open_bdev, but additionally freezes the queue,
818 * ensures the correct locking order between freeze queue and q->rq_qos_mutex.
819 *
820 * This function returns negative error on failure. On success it returns
821 * memflags which must be saved and later passed to blkg_conf_exit_frozen
822 * for restoring the memalloc scope.
823 */
blkg_conf_open_bdev_frozen(struct blkg_conf_ctx * ctx)824 unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx)
825 {
826 int ret;
827 unsigned long memflags;
828
829 if (ctx->bdev)
830 return -EINVAL;
831
832 ret = blkg_conf_open_bdev(ctx);
833 if (ret < 0)
834 return ret;
835 /*
836 * At this point, we haven’t started protecting anything related to QoS,
837 * so we release q->rq_qos_mutex here, which was first acquired in blkg_
838 * conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing
839 * the queue to maintain the correct locking order.
840 */
841 mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
842
843 memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue);
844 mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex);
845
846 return memflags;
847 }
848
849 /**
850 * blkg_conf_prep - parse and prepare for per-blkg config update
851 * @blkcg: target block cgroup
852 * @pol: target policy
853 * @ctx: blkg_conf_ctx initialized with blkg_conf_init()
854 *
855 * Parse per-blkg config update from @ctx->input and initialize @ctx
856 * accordingly. On success, @ctx->body points to the part of @ctx->input
857 * following MAJ:MIN, @ctx->bdev points to the target block device and
858 * @ctx->blkg to the blkg being configured.
859 *
860 * blkg_conf_open_bdev() may be called on @ctx beforehand. On success, this
861 * function returns with queue lock held and must be followed by
862 * blkg_conf_exit().
863 */
blkg_conf_prep(struct blkcg * blkcg,const struct blkcg_policy * pol,struct blkg_conf_ctx * ctx)864 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
865 struct blkg_conf_ctx *ctx)
866 __acquires(&bdev->bd_queue->queue_lock)
867 {
868 struct gendisk *disk;
869 struct request_queue *q;
870 struct blkcg_gq *blkg;
871 int ret;
872
873 ret = blkg_conf_open_bdev(ctx);
874 if (ret)
875 return ret;
876
877 disk = ctx->bdev->bd_disk;
878 q = disk->queue;
879
880 /* Prevent concurrent with blkcg_deactivate_policy() */
881 mutex_lock(&q->blkcg_mutex);
882 spin_lock_irq(&q->queue_lock);
883
884 if (!blkcg_policy_enabled(q, pol)) {
885 ret = -EOPNOTSUPP;
886 goto fail_unlock;
887 }
888
889 blkg = blkg_lookup(blkcg, q);
890 if (blkg)
891 goto success;
892
893 /*
894 * Create blkgs walking down from blkcg_root to @blkcg, so that all
895 * non-root blkgs have access to their parents.
896 */
897 while (true) {
898 struct blkcg *pos = blkcg;
899 struct blkcg *parent;
900 struct blkcg_gq *new_blkg;
901
902 parent = blkcg_parent(blkcg);
903 while (parent && !blkg_lookup(parent, q)) {
904 pos = parent;
905 parent = blkcg_parent(parent);
906 }
907
908 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
909 spin_unlock_irq(&q->queue_lock);
910
911 new_blkg = blkg_alloc(pos, disk, GFP_NOIO);
912 if (unlikely(!new_blkg)) {
913 ret = -ENOMEM;
914 goto fail_exit;
915 }
916
917 if (radix_tree_preload(GFP_KERNEL)) {
918 blkg_free(new_blkg);
919 ret = -ENOMEM;
920 goto fail_exit;
921 }
922
923 spin_lock_irq(&q->queue_lock);
924
925 if (!blkcg_policy_enabled(q, pol)) {
926 blkg_free(new_blkg);
927 ret = -EOPNOTSUPP;
928 goto fail_preloaded;
929 }
930
931 blkg = blkg_lookup(pos, q);
932 if (blkg) {
933 blkg_free(new_blkg);
934 } else {
935 blkg = blkg_create(pos, disk, new_blkg);
936 if (IS_ERR(blkg)) {
937 ret = PTR_ERR(blkg);
938 goto fail_preloaded;
939 }
940 }
941
942 radix_tree_preload_end();
943
944 if (pos == blkcg)
945 goto success;
946 }
947 success:
948 mutex_unlock(&q->blkcg_mutex);
949 ctx->blkg = blkg;
950 return 0;
951
952 fail_preloaded:
953 radix_tree_preload_end();
954 fail_unlock:
955 spin_unlock_irq(&q->queue_lock);
956 fail_exit:
957 mutex_unlock(&q->blkcg_mutex);
958 /*
959 * If queue was bypassing, we should retry. Do so after a
960 * short msleep(). It isn't strictly necessary but queue
961 * can be bypassing for some time and it's always nice to
962 * avoid busy looping.
963 */
964 if (ret == -EBUSY) {
965 msleep(10);
966 ret = restart_syscall();
967 }
968 return ret;
969 }
970 EXPORT_SYMBOL_GPL(blkg_conf_prep);
971
972 /**
973 * blkg_conf_exit - clean up per-blkg config update
974 * @ctx: blkg_conf_ctx initialized with blkg_conf_init()
975 *
976 * Clean up after per-blkg config update. This function must be called on all
977 * blkg_conf_ctx's initialized with blkg_conf_init().
978 */
blkg_conf_exit(struct blkg_conf_ctx * ctx)979 void blkg_conf_exit(struct blkg_conf_ctx *ctx)
980 __releases(&ctx->bdev->bd_queue->queue_lock)
981 __releases(&ctx->bdev->bd_queue->rq_qos_mutex)
982 {
983 if (ctx->blkg) {
984 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock);
985 ctx->blkg = NULL;
986 }
987
988 if (ctx->bdev) {
989 mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
990 blkdev_put_no_open(ctx->bdev);
991 ctx->body = NULL;
992 ctx->bdev = NULL;
993 }
994 }
995 EXPORT_SYMBOL_GPL(blkg_conf_exit);
996
997 /*
998 * Similar to blkg_conf_exit, but also unfreezes the queue. Should be used
999 * when blkg_conf_open_bdev_frozen is used to open the bdev.
1000 */
blkg_conf_exit_frozen(struct blkg_conf_ctx * ctx,unsigned long memflags)1001 void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
1002 {
1003 if (ctx->bdev) {
1004 struct request_queue *q = ctx->bdev->bd_queue;
1005
1006 blkg_conf_exit(ctx);
1007 blk_mq_unfreeze_queue(q, memflags);
1008 }
1009 }
1010
blkg_iostat_add(struct blkg_iostat * dst,struct blkg_iostat * src)1011 static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
1012 {
1013 int i;
1014
1015 for (i = 0; i < BLKG_IOSTAT_NR; i++) {
1016 dst->bytes[i] += src->bytes[i];
1017 dst->ios[i] += src->ios[i];
1018 }
1019 }
1020
blkg_iostat_sub(struct blkg_iostat * dst,struct blkg_iostat * src)1021 static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src)
1022 {
1023 int i;
1024
1025 for (i = 0; i < BLKG_IOSTAT_NR; i++) {
1026 dst->bytes[i] -= src->bytes[i];
1027 dst->ios[i] -= src->ios[i];
1028 }
1029 }
1030
blkcg_iostat_update(struct blkcg_gq * blkg,struct blkg_iostat * cur,struct blkg_iostat * last)1031 static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur,
1032 struct blkg_iostat *last)
1033 {
1034 struct blkg_iostat delta;
1035 unsigned long flags;
1036
1037 /* propagate percpu delta to global */
1038 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
1039 blkg_iostat_set(&delta, cur);
1040 blkg_iostat_sub(&delta, last);
1041 blkg_iostat_add(&blkg->iostat.cur, &delta);
1042 blkg_iostat_add(last, &delta);
1043 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
1044 }
1045
__blkcg_rstat_flush(struct blkcg * blkcg,int cpu)1046 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
1047 {
1048 struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
1049 struct llist_node *lnode;
1050 struct blkg_iostat_set *bisc, *next_bisc;
1051 unsigned long flags;
1052
1053 rcu_read_lock();
1054
1055 lnode = llist_del_all(lhead);
1056 if (!lnode)
1057 goto out;
1058
1059 /*
1060 * For covering concurrent parent blkg update from blkg_release().
1061 *
1062 * When flushing from cgroup, the subsystem rstat lock is always held,
1063 * so this lock won't cause contention most of time.
1064 */
1065 raw_spin_lock_irqsave(&blkg_stat_lock, flags);
1066
1067 /*
1068 * Iterate only the iostat_cpu's queued in the lockless list.
1069 */
1070 llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) {
1071 struct blkcg_gq *blkg = bisc->blkg;
1072 struct blkcg_gq *parent = blkg->parent;
1073 struct blkg_iostat cur;
1074 unsigned int seq;
1075
1076 /*
1077 * Order assignment of `next_bisc` from `bisc->lnode.next` in
1078 * llist_for_each_entry_safe and clearing `bisc->lqueued` for
1079 * avoiding to assign `next_bisc` with new next pointer added
1080 * in blk_cgroup_bio_start() in case of re-ordering.
1081 *
1082 * The pair barrier is implied in llist_add() in blk_cgroup_bio_start().
1083 */
1084 smp_mb();
1085
1086 WRITE_ONCE(bisc->lqueued, false);
1087 if (bisc == &blkg->iostat)
1088 goto propagate_up; /* propagate up to parent only */
1089
1090 /* fetch the current per-cpu values */
1091 do {
1092 seq = u64_stats_fetch_begin(&bisc->sync);
1093 blkg_iostat_set(&cur, &bisc->cur);
1094 } while (u64_stats_fetch_retry(&bisc->sync, seq));
1095
1096 blkcg_iostat_update(blkg, &cur, &bisc->last);
1097
1098 propagate_up:
1099 /* propagate global delta to parent (unless that's root) */
1100 if (parent && parent->parent) {
1101 blkcg_iostat_update(parent, &blkg->iostat.cur,
1102 &blkg->iostat.last);
1103 /*
1104 * Queue parent->iostat to its blkcg's lockless
1105 * list to propagate up to the grandparent if the
1106 * iostat hasn't been queued yet.
1107 */
1108 if (!parent->iostat.lqueued) {
1109 struct llist_head *plhead;
1110
1111 plhead = per_cpu_ptr(parent->blkcg->lhead, cpu);
1112 llist_add(&parent->iostat.lnode, plhead);
1113 parent->iostat.lqueued = true;
1114 }
1115 }
1116 }
1117 raw_spin_unlock_irqrestore(&blkg_stat_lock, flags);
1118 out:
1119 rcu_read_unlock();
1120 }
1121
blkcg_rstat_flush(struct cgroup_subsys_state * css,int cpu)1122 static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
1123 {
1124 /* Root-level stats are sourced from system-wide IO stats */
1125 if (cgroup_parent(css->cgroup))
1126 __blkcg_rstat_flush(css_to_blkcg(css), cpu);
1127 }
1128
1129 /*
1130 * We source root cgroup stats from the system-wide stats to avoid
1131 * tracking the same information twice and incurring overhead when no
1132 * cgroups are defined. For that reason, css_rstat_flush in
1133 * blkcg_print_stat does not actually fill out the iostat in the root
1134 * cgroup's blkcg_gq.
1135 *
1136 * However, we would like to re-use the printing code between the root and
1137 * non-root cgroups to the extent possible. For that reason, we simulate
1138 * flushing the root cgroup's stats by explicitly filling in the iostat
1139 * with disk level statistics.
1140 */
blkcg_fill_root_iostats(void)1141 static void blkcg_fill_root_iostats(void)
1142 {
1143 struct class_dev_iter iter;
1144 struct device *dev;
1145
1146 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
1147 while ((dev = class_dev_iter_next(&iter))) {
1148 struct block_device *bdev = dev_to_bdev(dev);
1149 struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg;
1150 struct blkg_iostat tmp;
1151 int cpu;
1152 unsigned long flags;
1153
1154 memset(&tmp, 0, sizeof(tmp));
1155 for_each_possible_cpu(cpu) {
1156 struct disk_stats *cpu_dkstats;
1157
1158 cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
1159 tmp.ios[BLKG_IOSTAT_READ] +=
1160 cpu_dkstats->ios[STAT_READ];
1161 tmp.ios[BLKG_IOSTAT_WRITE] +=
1162 cpu_dkstats->ios[STAT_WRITE];
1163 tmp.ios[BLKG_IOSTAT_DISCARD] +=
1164 cpu_dkstats->ios[STAT_DISCARD];
1165 // convert sectors to bytes
1166 tmp.bytes[BLKG_IOSTAT_READ] +=
1167 cpu_dkstats->sectors[STAT_READ] << 9;
1168 tmp.bytes[BLKG_IOSTAT_WRITE] +=
1169 cpu_dkstats->sectors[STAT_WRITE] << 9;
1170 tmp.bytes[BLKG_IOSTAT_DISCARD] +=
1171 cpu_dkstats->sectors[STAT_DISCARD] << 9;
1172 }
1173
1174 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
1175 blkg_iostat_set(&blkg->iostat.cur, &tmp);
1176 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
1177 }
1178 class_dev_iter_exit(&iter);
1179 }
1180
blkcg_print_one_stat(struct blkcg_gq * blkg,struct seq_file * s)1181 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
1182 {
1183 struct blkg_iostat_set *bis = &blkg->iostat;
1184 u64 rbytes, wbytes, rios, wios, dbytes, dios;
1185 const char *dname;
1186 unsigned seq;
1187 int i;
1188
1189 if (!blkg->online)
1190 return;
1191
1192 dname = blkg_dev_name(blkg);
1193 if (!dname)
1194 return;
1195
1196 seq_printf(s, "%s ", dname);
1197
1198 do {
1199 seq = u64_stats_fetch_begin(&bis->sync);
1200
1201 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
1202 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
1203 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
1204 rios = bis->cur.ios[BLKG_IOSTAT_READ];
1205 wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
1206 dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
1207 } while (u64_stats_fetch_retry(&bis->sync, seq));
1208
1209 if (rbytes || wbytes || rios || wios) {
1210 seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
1211 rbytes, wbytes, rios, wios,
1212 dbytes, dios);
1213 }
1214
1215 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
1216 seq_printf(s, " use_delay=%d delay_nsec=%llu",
1217 atomic_read(&blkg->use_delay),
1218 atomic64_read(&blkg->delay_nsec));
1219 }
1220
1221 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1222 struct blkcg_policy *pol = blkcg_policy[i];
1223
1224 if (!blkg->pd[i] || !pol->pd_stat_fn)
1225 continue;
1226
1227 pol->pd_stat_fn(blkg->pd[i], s);
1228 }
1229
1230 seq_puts(s, "\n");
1231 }
1232
blkcg_print_stat(struct seq_file * sf,void * v)1233 static int blkcg_print_stat(struct seq_file *sf, void *v)
1234 {
1235 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1236 struct blkcg_gq *blkg;
1237
1238 if (!seq_css(sf)->parent)
1239 blkcg_fill_root_iostats();
1240 else
1241 css_rstat_flush(&blkcg->css);
1242
1243 rcu_read_lock();
1244 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
1245 spin_lock_irq(&blkg->q->queue_lock);
1246 blkcg_print_one_stat(blkg, sf);
1247 spin_unlock_irq(&blkg->q->queue_lock);
1248 }
1249 rcu_read_unlock();
1250 return 0;
1251 }
1252
1253 static struct cftype blkcg_files[] = {
1254 {
1255 .name = "stat",
1256 .seq_show = blkcg_print_stat,
1257 },
1258 { } /* terminate */
1259 };
1260
1261 static struct cftype blkcg_legacy_files[] = {
1262 {
1263 .name = "reset_stats",
1264 .write_u64 = blkcg_reset_stats,
1265 },
1266 { } /* terminate */
1267 };
1268
1269 #ifdef CONFIG_CGROUP_WRITEBACK
blkcg_get_cgwb_list(struct cgroup_subsys_state * css)1270 struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css)
1271 {
1272 return &css_to_blkcg(css)->cgwb_list;
1273 }
1274 #endif
1275
1276 /*
1277 * blkcg destruction is a three-stage process.
1278 *
1279 * 1. Destruction starts. The blkcg_css_offline() callback is invoked
1280 * which offlines writeback. Here we tie the next stage of blkg destruction
1281 * to the completion of writeback associated with the blkcg. This lets us
1282 * avoid punting potentially large amounts of outstanding writeback to root
1283 * while maintaining any ongoing policies. The next stage is triggered when
1284 * the nr_cgwbs count goes to zero.
1285 *
1286 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1287 * and handles the destruction of blkgs. Here the css reference held by
1288 * the blkg is put back eventually allowing blkcg_css_free() to be called.
1289 * This work may occur in cgwb_release_workfn() on the cgwb_release
1290 * workqueue. Any submitted ios that fail to get the blkg ref will be
1291 * punted to the root_blkg.
1292 *
1293 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1294 * This finally frees the blkcg.
1295 */
1296
1297 /**
1298 * blkcg_destroy_blkgs - responsible for shooting down blkgs
1299 * @blkcg: blkcg of interest
1300 *
1301 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1302 * is nested inside q lock, this function performs reverse double lock dancing.
1303 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1304 * blkcg_css_free to eventually be called.
1305 *
1306 * This is the blkcg counterpart of ioc_release_fn().
1307 */
blkcg_destroy_blkgs(struct blkcg * blkcg)1308 static void blkcg_destroy_blkgs(struct blkcg *blkcg)
1309 {
1310 might_sleep();
1311
1312 spin_lock_irq(&blkcg->lock);
1313
1314 while (!hlist_empty(&blkcg->blkg_list)) {
1315 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1316 struct blkcg_gq, blkcg_node);
1317 struct request_queue *q = blkg->q;
1318
1319 if (need_resched() || !spin_trylock(&q->queue_lock)) {
1320 /*
1321 * Given that the system can accumulate a huge number
1322 * of blkgs in pathological cases, check to see if we
1323 * need to rescheduling to avoid softlockup.
1324 */
1325 spin_unlock_irq(&blkcg->lock);
1326 cond_resched();
1327 spin_lock_irq(&blkcg->lock);
1328 continue;
1329 }
1330
1331 blkg_destroy(blkg);
1332 spin_unlock(&q->queue_lock);
1333 }
1334
1335 spin_unlock_irq(&blkcg->lock);
1336 }
1337
1338 /**
1339 * blkcg_pin_online - pin online state
1340 * @blkcg_css: blkcg of interest
1341 *
1342 * While pinned, a blkcg is kept online. This is primarily used to
1343 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
1344 * while an associated cgwb is still active.
1345 */
blkcg_pin_online(struct cgroup_subsys_state * blkcg_css)1346 void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css)
1347 {
1348 refcount_inc(&css_to_blkcg(blkcg_css)->online_pin);
1349 }
1350
1351 /**
1352 * blkcg_unpin_online - unpin online state
1353 * @blkcg_css: blkcg of interest
1354 *
1355 * This is primarily used to impedance-match blkg and cgwb lifetimes so
1356 * that blkg doesn't go offline while an associated cgwb is still active.
1357 * When this count goes to zero, all active cgwbs have finished so the
1358 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
1359 */
blkcg_unpin_online(struct cgroup_subsys_state * blkcg_css)1360 void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css)
1361 {
1362 struct blkcg *blkcg = css_to_blkcg(blkcg_css);
1363
1364 do {
1365 struct blkcg *parent;
1366
1367 if (!refcount_dec_and_test(&blkcg->online_pin))
1368 break;
1369
1370 parent = blkcg_parent(blkcg);
1371 blkcg_destroy_blkgs(blkcg);
1372 blkcg = parent;
1373 } while (blkcg);
1374 }
1375
1376 /**
1377 * blkcg_css_offline - cgroup css_offline callback
1378 * @css: css of interest
1379 *
1380 * This function is called when @css is about to go away. Here the cgwbs are
1381 * offlined first and only once writeback associated with the blkcg has
1382 * finished do we start step 2 (see above).
1383 */
blkcg_css_offline(struct cgroup_subsys_state * css)1384 static void blkcg_css_offline(struct cgroup_subsys_state *css)
1385 {
1386 /* this prevents anyone from attaching or migrating to this blkcg */
1387 wb_blkcg_offline(css);
1388
1389 /* put the base online pin allowing step 2 to be triggered */
1390 blkcg_unpin_online(css);
1391 }
1392
blkcg_css_free(struct cgroup_subsys_state * css)1393 static void blkcg_css_free(struct cgroup_subsys_state *css)
1394 {
1395 struct blkcg *blkcg = css_to_blkcg(css);
1396 int i;
1397
1398 mutex_lock(&blkcg_pol_mutex);
1399
1400 list_del(&blkcg->all_blkcgs_node);
1401
1402 for (i = 0; i < BLKCG_MAX_POLS; i++)
1403 if (blkcg->cpd[i])
1404 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1405
1406 mutex_unlock(&blkcg_pol_mutex);
1407
1408 free_percpu(blkcg->lhead);
1409 kfree(blkcg);
1410 }
1411
1412 static struct cgroup_subsys_state *
blkcg_css_alloc(struct cgroup_subsys_state * parent_css)1413 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1414 {
1415 struct blkcg *blkcg;
1416 int i;
1417
1418 mutex_lock(&blkcg_pol_mutex);
1419
1420 if (!parent_css) {
1421 blkcg = &blkcg_root;
1422 } else {
1423 blkcg = kzalloc_obj(*blkcg);
1424 if (!blkcg)
1425 goto unlock;
1426 }
1427
1428 if (init_blkcg_llists(blkcg))
1429 goto free_blkcg;
1430
1431 for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1432 struct blkcg_policy *pol = blkcg_policy[i];
1433 struct blkcg_policy_data *cpd;
1434
1435 /*
1436 * If the policy hasn't been attached yet, wait for it
1437 * to be attached before doing anything else. Otherwise,
1438 * check if the policy requires any specific per-cgroup
1439 * data: if it does, allocate and initialize it.
1440 */
1441 if (!pol || !pol->cpd_alloc_fn)
1442 continue;
1443
1444 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1445 if (!cpd)
1446 goto free_pd_blkcg;
1447
1448 blkcg->cpd[i] = cpd;
1449 cpd->blkcg = blkcg;
1450 cpd->plid = i;
1451 }
1452
1453 spin_lock_init(&blkcg->lock);
1454 refcount_set(&blkcg->online_pin, 1);
1455 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
1456 INIT_HLIST_HEAD(&blkcg->blkg_list);
1457 #ifdef CONFIG_CGROUP_WRITEBACK
1458 INIT_LIST_HEAD(&blkcg->cgwb_list);
1459 #endif
1460 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1461
1462 mutex_unlock(&blkcg_pol_mutex);
1463 return &blkcg->css;
1464
1465 free_pd_blkcg:
1466 for (i--; i >= 0; i--)
1467 if (blkcg->cpd[i])
1468 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1469 free_percpu(blkcg->lhead);
1470 free_blkcg:
1471 if (blkcg != &blkcg_root)
1472 kfree(blkcg);
1473 unlock:
1474 mutex_unlock(&blkcg_pol_mutex);
1475 return ERR_PTR(-ENOMEM);
1476 }
1477
blkcg_css_online(struct cgroup_subsys_state * css)1478 static int blkcg_css_online(struct cgroup_subsys_state *css)
1479 {
1480 struct blkcg *parent = blkcg_parent(css_to_blkcg(css));
1481
1482 /*
1483 * blkcg_pin_online() is used to delay blkcg offline so that blkgs
1484 * don't go offline while cgwbs are still active on them. Pin the
1485 * parent so that offline always happens towards the root.
1486 */
1487 if (parent)
1488 blkcg_pin_online(&parent->css);
1489 return 0;
1490 }
1491
blkg_init_queue(struct request_queue * q)1492 void blkg_init_queue(struct request_queue *q)
1493 {
1494 INIT_LIST_HEAD(&q->blkg_list);
1495 mutex_init(&q->blkcg_mutex);
1496 }
1497
blkcg_init_disk(struct gendisk * disk)1498 int blkcg_init_disk(struct gendisk *disk)
1499 {
1500 struct request_queue *q = disk->queue;
1501 struct blkcg_gq *new_blkg, *blkg;
1502 bool preloaded;
1503
1504 /*
1505 * If the queue is shared across disk rebind (e.g., SCSI), the
1506 * previous disk's blkcg state is cleaned up asynchronously via
1507 * disk_release() -> blkcg_exit_disk(). Wait for that cleanup to
1508 * finish (indicated by root_blkg becoming NULL) before setting up
1509 * new blkcg state. Otherwise, we may overwrite q->root_blkg while
1510 * the old one is still alive, and radix_tree_insert() in
1511 * blkg_create() will fail with -EEXIST because the old entries
1512 * still occupy the same queue id slot in blkcg->blkg_tree.
1513 */
1514 wait_var_event(&q->root_blkg, !READ_ONCE(q->root_blkg));
1515
1516 new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
1517 if (!new_blkg)
1518 return -ENOMEM;
1519
1520 preloaded = !radix_tree_preload(GFP_KERNEL);
1521
1522 /* Make sure the root blkg exists. */
1523 /* spin_lock_irq can serve as RCU read-side critical section. */
1524 spin_lock_irq(&q->queue_lock);
1525 blkg = blkg_create(&blkcg_root, disk, new_blkg);
1526 if (IS_ERR(blkg))
1527 goto err_unlock;
1528 q->root_blkg = blkg;
1529 spin_unlock_irq(&q->queue_lock);
1530
1531 if (preloaded)
1532 radix_tree_preload_end();
1533
1534 return 0;
1535
1536 err_unlock:
1537 spin_unlock_irq(&q->queue_lock);
1538 if (preloaded)
1539 radix_tree_preload_end();
1540 return PTR_ERR(blkg);
1541 }
1542
blkcg_exit_disk(struct gendisk * disk)1543 void blkcg_exit_disk(struct gendisk *disk)
1544 {
1545 blkg_destroy_all(disk);
1546 blk_throtl_exit(disk);
1547 }
1548
blkcg_exit(struct task_struct * tsk)1549 static void blkcg_exit(struct task_struct *tsk)
1550 {
1551 if (tsk->throttle_disk)
1552 put_disk(tsk->throttle_disk);
1553 tsk->throttle_disk = NULL;
1554 }
1555
1556 struct cgroup_subsys io_cgrp_subsys = {
1557 .css_alloc = blkcg_css_alloc,
1558 .css_online = blkcg_css_online,
1559 .css_offline = blkcg_css_offline,
1560 .css_free = blkcg_css_free,
1561 .css_rstat_flush = blkcg_rstat_flush,
1562 .dfl_cftypes = blkcg_files,
1563 .legacy_cftypes = blkcg_legacy_files,
1564 .legacy_name = "blkio",
1565 .exit = blkcg_exit,
1566 #ifdef CONFIG_MEMCG
1567 /*
1568 * This ensures that, if available, memcg is automatically enabled
1569 * together on the default hierarchy so that the owner cgroup can
1570 * be retrieved from writeback pages.
1571 */
1572 .depends_on = 1 << memory_cgrp_id,
1573 #endif
1574 };
1575 EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1576
1577 /**
1578 * blkcg_activate_policy - activate a blkcg policy on a gendisk
1579 * @disk: gendisk of interest
1580 * @pol: blkcg policy to activate
1581 *
1582 * Activate @pol on @disk. Requires %GFP_KERNEL context. @disk goes through
1583 * bypass mode to populate its blkgs with policy_data for @pol.
1584 *
1585 * Activation happens with @disk bypassed, so nobody would be accessing blkgs
1586 * from IO path. Update of each blkg is protected by both queue and blkcg
1587 * locks so that holding either lock and testing blkcg_policy_enabled() is
1588 * always enough for dereferencing policy data.
1589 *
1590 * The caller is responsible for synchronizing [de]activations and policy
1591 * [un]registerations. Returns 0 on success, -errno on failure.
1592 */
blkcg_activate_policy(struct gendisk * disk,const struct blkcg_policy * pol)1593 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
1594 {
1595 struct request_queue *q = disk->queue;
1596 struct blkg_policy_data *pd_prealloc = NULL;
1597 struct blkcg_gq *blkg, *pinned_blkg = NULL;
1598 unsigned int memflags;
1599 int ret;
1600
1601 if (blkcg_policy_enabled(q, pol))
1602 return 0;
1603
1604 /*
1605 * Policy is allowed to be registered without pd_alloc_fn/pd_free_fn,
1606 * for example, ioprio. Such policy will work on blkcg level, not disk
1607 * level, and don't need to be activated.
1608 */
1609 if (WARN_ON_ONCE(!pol->pd_alloc_fn || !pol->pd_free_fn))
1610 return -EINVAL;
1611
1612 if (queue_is_mq(q))
1613 memflags = blk_mq_freeze_queue(q);
1614 retry:
1615 spin_lock_irq(&q->queue_lock);
1616
1617 /* blkg_list is pushed at the head, reverse walk to initialize parents first */
1618 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
1619 struct blkg_policy_data *pd;
1620
1621 if (blkg->pd[pol->plid])
1622 continue;
1623
1624 /* If prealloc matches, use it; otherwise try GFP_NOWAIT */
1625 if (blkg == pinned_blkg) {
1626 pd = pd_prealloc;
1627 pd_prealloc = NULL;
1628 } else {
1629 pd = pol->pd_alloc_fn(disk, blkg->blkcg,
1630 GFP_NOWAIT);
1631 }
1632
1633 if (!pd) {
1634 /*
1635 * GFP_NOWAIT failed. Free the existing one and
1636 * prealloc for @blkg w/ GFP_KERNEL.
1637 */
1638 if (pinned_blkg)
1639 blkg_put(pinned_blkg);
1640 blkg_get(blkg);
1641 pinned_blkg = blkg;
1642
1643 spin_unlock_irq(&q->queue_lock);
1644
1645 if (pd_prealloc)
1646 pol->pd_free_fn(pd_prealloc);
1647 pd_prealloc = pol->pd_alloc_fn(disk, blkg->blkcg,
1648 GFP_KERNEL);
1649 if (pd_prealloc)
1650 goto retry;
1651 else
1652 goto enomem;
1653 }
1654
1655 spin_lock(&blkg->blkcg->lock);
1656
1657 pd->blkg = blkg;
1658 pd->plid = pol->plid;
1659 blkg->pd[pol->plid] = pd;
1660
1661 if (pol->pd_init_fn)
1662 pol->pd_init_fn(pd);
1663
1664 if (pol->pd_online_fn)
1665 pol->pd_online_fn(pd);
1666 pd->online = true;
1667
1668 spin_unlock(&blkg->blkcg->lock);
1669 }
1670
1671 __set_bit(pol->plid, q->blkcg_pols);
1672 ret = 0;
1673
1674 spin_unlock_irq(&q->queue_lock);
1675 out:
1676 if (queue_is_mq(q))
1677 blk_mq_unfreeze_queue(q, memflags);
1678 if (pinned_blkg)
1679 blkg_put(pinned_blkg);
1680 if (pd_prealloc)
1681 pol->pd_free_fn(pd_prealloc);
1682 return ret;
1683
1684 enomem:
1685 /* alloc failed, take down everything */
1686 spin_lock_irq(&q->queue_lock);
1687 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1688 struct blkcg *blkcg = blkg->blkcg;
1689 struct blkg_policy_data *pd;
1690
1691 spin_lock(&blkcg->lock);
1692 pd = blkg->pd[pol->plid];
1693 if (pd) {
1694 if (pd->online && pol->pd_offline_fn)
1695 pol->pd_offline_fn(pd);
1696 pd->online = false;
1697 pol->pd_free_fn(pd);
1698 blkg->pd[pol->plid] = NULL;
1699 }
1700 spin_unlock(&blkcg->lock);
1701 }
1702 spin_unlock_irq(&q->queue_lock);
1703 ret = -ENOMEM;
1704 goto out;
1705 }
1706 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1707
1708 /**
1709 * blkcg_deactivate_policy - deactivate a blkcg policy on a gendisk
1710 * @disk: gendisk of interest
1711 * @pol: blkcg policy to deactivate
1712 *
1713 * Deactivate @pol on @disk. Follows the same synchronization rules as
1714 * blkcg_activate_policy().
1715 */
blkcg_deactivate_policy(struct gendisk * disk,const struct blkcg_policy * pol)1716 void blkcg_deactivate_policy(struct gendisk *disk,
1717 const struct blkcg_policy *pol)
1718 {
1719 struct request_queue *q = disk->queue;
1720 struct blkcg_gq *blkg;
1721 unsigned int memflags;
1722
1723 if (!blkcg_policy_enabled(q, pol))
1724 return;
1725
1726 if (queue_is_mq(q))
1727 memflags = blk_mq_freeze_queue(q);
1728
1729 mutex_lock(&q->blkcg_mutex);
1730 spin_lock_irq(&q->queue_lock);
1731
1732 __clear_bit(pol->plid, q->blkcg_pols);
1733
1734 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1735 struct blkcg *blkcg = blkg->blkcg;
1736
1737 spin_lock(&blkcg->lock);
1738 if (blkg->pd[pol->plid]) {
1739 if (blkg->pd[pol->plid]->online && pol->pd_offline_fn)
1740 pol->pd_offline_fn(blkg->pd[pol->plid]);
1741 pol->pd_free_fn(blkg->pd[pol->plid]);
1742 blkg->pd[pol->plid] = NULL;
1743 }
1744 spin_unlock(&blkcg->lock);
1745 }
1746
1747 spin_unlock_irq(&q->queue_lock);
1748 mutex_unlock(&q->blkcg_mutex);
1749
1750 if (queue_is_mq(q))
1751 blk_mq_unfreeze_queue(q, memflags);
1752 }
1753 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1754
blkcg_free_all_cpd(struct blkcg_policy * pol)1755 static void blkcg_free_all_cpd(struct blkcg_policy *pol)
1756 {
1757 struct blkcg *blkcg;
1758
1759 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1760 if (blkcg->cpd[pol->plid]) {
1761 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1762 blkcg->cpd[pol->plid] = NULL;
1763 }
1764 }
1765 }
1766
1767 /**
1768 * blkcg_policy_register - register a blkcg policy
1769 * @pol: blkcg policy to register
1770 *
1771 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1772 * successful registration. Returns 0 on success and -errno on failure.
1773 */
blkcg_policy_register(struct blkcg_policy * pol)1774 int blkcg_policy_register(struct blkcg_policy *pol)
1775 {
1776 struct blkcg *blkcg;
1777 int i, ret;
1778
1779 /*
1780 * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy
1781 * without pd_alloc_fn/pd_free_fn can't be activated.
1782 */
1783 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1784 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1785 return -EINVAL;
1786
1787 mutex_lock(&blkcg_pol_register_mutex);
1788 mutex_lock(&blkcg_pol_mutex);
1789
1790 /* find an empty slot */
1791 for (i = 0; i < BLKCG_MAX_POLS; i++)
1792 if (!blkcg_policy[i])
1793 break;
1794 if (i >= BLKCG_MAX_POLS) {
1795 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1796 ret = -ENOSPC;
1797 goto err_unlock;
1798 }
1799
1800 /* register @pol */
1801 pol->plid = i;
1802 blkcg_policy[pol->plid] = pol;
1803
1804 /* allocate and install cpd's */
1805 if (pol->cpd_alloc_fn) {
1806 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1807 struct blkcg_policy_data *cpd;
1808
1809 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1810 if (!cpd) {
1811 ret = -ENOMEM;
1812 goto err_free_cpds;
1813 }
1814
1815 blkcg->cpd[pol->plid] = cpd;
1816 cpd->blkcg = blkcg;
1817 cpd->plid = pol->plid;
1818 }
1819 }
1820
1821 mutex_unlock(&blkcg_pol_mutex);
1822
1823 /* everything is in place, add intf files for the new policy */
1824 if (pol->dfl_cftypes == pol->legacy_cftypes) {
1825 WARN_ON(cgroup_add_cftypes(&io_cgrp_subsys,
1826 pol->dfl_cftypes));
1827 } else {
1828 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1829 pol->dfl_cftypes));
1830 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1831 pol->legacy_cftypes));
1832 }
1833 mutex_unlock(&blkcg_pol_register_mutex);
1834 return 0;
1835
1836 err_free_cpds:
1837 if (pol->cpd_free_fn)
1838 blkcg_free_all_cpd(pol);
1839
1840 blkcg_policy[pol->plid] = NULL;
1841 err_unlock:
1842 mutex_unlock(&blkcg_pol_mutex);
1843 mutex_unlock(&blkcg_pol_register_mutex);
1844 return ret;
1845 }
1846 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1847
1848 /**
1849 * blkcg_policy_unregister - unregister a blkcg policy
1850 * @pol: blkcg policy to unregister
1851 *
1852 * Undo blkcg_policy_register(@pol). Might sleep.
1853 */
blkcg_policy_unregister(struct blkcg_policy * pol)1854 void blkcg_policy_unregister(struct blkcg_policy *pol)
1855 {
1856 mutex_lock(&blkcg_pol_register_mutex);
1857
1858 if (WARN_ON(blkcg_policy[pol->plid] != pol))
1859 goto out_unlock;
1860
1861 /* kill the intf files first */
1862 if (pol->dfl_cftypes)
1863 cgroup_rm_cftypes(pol->dfl_cftypes);
1864 if (pol->legacy_cftypes)
1865 cgroup_rm_cftypes(pol->legacy_cftypes);
1866
1867 /* remove cpds and unregister */
1868 mutex_lock(&blkcg_pol_mutex);
1869
1870 if (pol->cpd_free_fn)
1871 blkcg_free_all_cpd(pol);
1872
1873 blkcg_policy[pol->plid] = NULL;
1874
1875 mutex_unlock(&blkcg_pol_mutex);
1876 out_unlock:
1877 mutex_unlock(&blkcg_pol_register_mutex);
1878 }
1879 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1880
1881 /*
1882 * Scale the accumulated delay based on how long it has been since we updated
1883 * the delay. We only call this when we are adding delay, in case it's been a
1884 * while since we added delay, and when we are checking to see if we need to
1885 * delay a task, to account for any delays that may have occurred.
1886 */
blkcg_scale_delay(struct blkcg_gq * blkg,u64 now)1887 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1888 {
1889 u64 old = atomic64_read(&blkg->delay_start);
1890
1891 /* negative use_delay means no scaling, see blkcg_set_delay() */
1892 if (atomic_read(&blkg->use_delay) < 0)
1893 return;
1894
1895 /*
1896 * We only want to scale down every second. The idea here is that we
1897 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1898 * time window. We only want to throttle tasks for recent delay that
1899 * has occurred, in 1 second time windows since that's the maximum
1900 * things can be throttled. We save the current delay window in
1901 * blkg->last_delay so we know what amount is still left to be charged
1902 * to the blkg from this point onward. blkg->last_use keeps track of
1903 * the use_delay counter. The idea is if we're unthrottling the blkg we
1904 * are ok with whatever is happening now, and we can take away more of
1905 * the accumulated delay as we've already throttled enough that
1906 * everybody is happy with their IO latencies.
1907 */
1908 if (time_before64(old + NSEC_PER_SEC, now) &&
1909 atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) {
1910 u64 cur = atomic64_read(&blkg->delay_nsec);
1911 u64 sub = min_t(u64, blkg->last_delay, now - old);
1912 int cur_use = atomic_read(&blkg->use_delay);
1913
1914 /*
1915 * We've been unthrottled, subtract a larger chunk of our
1916 * accumulated delay.
1917 */
1918 if (cur_use < blkg->last_use)
1919 sub = max_t(u64, sub, blkg->last_delay >> 1);
1920
1921 /*
1922 * This shouldn't happen, but handle it anyway. Our delay_nsec
1923 * should only ever be growing except here where we subtract out
1924 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1925 * rather not end up with negative numbers.
1926 */
1927 if (unlikely(cur < sub)) {
1928 atomic64_set(&blkg->delay_nsec, 0);
1929 blkg->last_delay = 0;
1930 } else {
1931 atomic64_sub(sub, &blkg->delay_nsec);
1932 blkg->last_delay = cur - sub;
1933 }
1934 blkg->last_use = cur_use;
1935 }
1936 }
1937
1938 /*
1939 * This is called when we want to actually walk up the hierarchy and check to
1940 * see if we need to throttle, and then actually throttle if there is some
1941 * accumulated delay. This should only be called upon return to user space so
1942 * we're not holding some lock that would induce a priority inversion.
1943 */
blkcg_maybe_throttle_blkg(struct blkcg_gq * blkg,bool use_memdelay)1944 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1945 {
1946 unsigned long pflags;
1947 bool clamp;
1948 u64 now = blk_time_get_ns();
1949 u64 exp;
1950 u64 delay_nsec = 0;
1951 int tok;
1952
1953 while (blkg->parent) {
1954 int use_delay = atomic_read(&blkg->use_delay);
1955
1956 if (use_delay) {
1957 u64 this_delay;
1958
1959 blkcg_scale_delay(blkg, now);
1960 this_delay = atomic64_read(&blkg->delay_nsec);
1961 if (this_delay > delay_nsec) {
1962 delay_nsec = this_delay;
1963 clamp = use_delay > 0;
1964 }
1965 }
1966 blkg = blkg->parent;
1967 }
1968
1969 if (!delay_nsec)
1970 return;
1971
1972 /*
1973 * Let's not sleep for all eternity if we've amassed a huge delay.
1974 * Swapping or metadata IO can accumulate 10's of seconds worth of
1975 * delay, and we want userspace to be able to do _something_ so cap the
1976 * delays at 0.25s. If there's 10's of seconds worth of delay then the
1977 * tasks will be delayed for 0.25 second for every syscall. If
1978 * blkcg_set_delay() was used as indicated by negative use_delay, the
1979 * caller is responsible for regulating the range.
1980 */
1981 if (clamp)
1982 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1983
1984 if (use_memdelay)
1985 psi_memstall_enter(&pflags);
1986
1987 exp = ktime_add_ns(now, delay_nsec);
1988 tok = io_schedule_prepare();
1989 do {
1990 __set_current_state(TASK_KILLABLE);
1991 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1992 break;
1993 } while (!fatal_signal_pending(current));
1994 io_schedule_finish(tok);
1995
1996 if (use_memdelay)
1997 psi_memstall_leave(&pflags);
1998 }
1999
2000 /**
2001 * blkcg_maybe_throttle_current - throttle the current task if it has been marked
2002 *
2003 * This is only called if we've been marked with set_notify_resume(). Obviously
2004 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
2005 * check to see if current->throttle_disk is set and if not this doesn't do
2006 * anything. This should only ever be called by the resume code, it's not meant
2007 * to be called by people willy-nilly as it will actually do the work to
2008 * throttle the task if it is setup for throttling.
2009 */
blkcg_maybe_throttle_current(void)2010 void blkcg_maybe_throttle_current(void)
2011 {
2012 struct gendisk *disk = current->throttle_disk;
2013 struct blkcg *blkcg;
2014 struct blkcg_gq *blkg;
2015 bool use_memdelay = current->use_memdelay;
2016
2017 if (!disk)
2018 return;
2019
2020 current->throttle_disk = NULL;
2021 current->use_memdelay = false;
2022
2023 rcu_read_lock();
2024 blkcg = css_to_blkcg(blkcg_css());
2025 if (!blkcg)
2026 goto out;
2027 blkg = blkg_lookup(blkcg, disk->queue);
2028 if (!blkg)
2029 goto out;
2030 if (!blkg_tryget(blkg))
2031 goto out;
2032 rcu_read_unlock();
2033
2034 blkcg_maybe_throttle_blkg(blkg, use_memdelay);
2035 blkg_put(blkg);
2036 put_disk(disk);
2037 return;
2038 out:
2039 rcu_read_unlock();
2040 put_disk(disk);
2041 }
2042
2043 /**
2044 * blkcg_schedule_throttle - this task needs to check for throttling
2045 * @disk: disk to throttle
2046 * @use_memdelay: do we charge this to memory delay for PSI
2047 *
2048 * This is called by the IO controller when we know there's delay accumulated
2049 * for the blkg for this task. We do not pass the blkg because there are places
2050 * we call this that may not have that information, the swapping code for
2051 * instance will only have a block_device at that point. This set's the
2052 * notify_resume for the task to check and see if it requires throttling before
2053 * returning to user space.
2054 *
2055 * We will only schedule once per syscall. You can call this over and over
2056 * again and it will only do the check once upon return to user space, and only
2057 * throttle once. If the task needs to be throttled again it'll need to be
2058 * re-set at the next time we see the task.
2059 */
blkcg_schedule_throttle(struct gendisk * disk,bool use_memdelay)2060 void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay)
2061 {
2062 if (unlikely(current->flags & PF_KTHREAD))
2063 return;
2064
2065 if (current->throttle_disk != disk) {
2066 if (test_bit(GD_DEAD, &disk->state))
2067 return;
2068 get_device(disk_to_dev(disk));
2069
2070 if (current->throttle_disk)
2071 put_disk(current->throttle_disk);
2072 current->throttle_disk = disk;
2073 }
2074
2075 if (use_memdelay)
2076 current->use_memdelay = use_memdelay;
2077 set_notify_resume(current);
2078 }
2079
2080 /**
2081 * blkcg_add_delay - add delay to this blkg
2082 * @blkg: blkg of interest
2083 * @now: the current time in nanoseconds
2084 * @delta: how many nanoseconds of delay to add
2085 *
2086 * Charge @delta to the blkg's current delay accumulation. This is used to
2087 * throttle tasks if an IO controller thinks we need more throttling.
2088 */
blkcg_add_delay(struct blkcg_gq * blkg,u64 now,u64 delta)2089 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
2090 {
2091 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
2092 return;
2093 blkcg_scale_delay(blkg, now);
2094 atomic64_add(delta, &blkg->delay_nsec);
2095 }
2096
2097 /**
2098 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
2099 * @bio: target bio
2100 * @css: target css
2101 *
2102 * As the failure mode here is to walk up the blkg tree, this ensure that the
2103 * blkg->parent pointers are always valid. This returns the blkg that it ended
2104 * up taking a reference on or %NULL if no reference was taken.
2105 */
blkg_tryget_closest(struct bio * bio,struct cgroup_subsys_state * css)2106 static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
2107 struct cgroup_subsys_state *css)
2108 {
2109 struct blkcg_gq *blkg, *ret_blkg = NULL;
2110
2111 rcu_read_lock();
2112 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk);
2113 while (blkg) {
2114 if (blkg_tryget(blkg)) {
2115 ret_blkg = blkg;
2116 break;
2117 }
2118 blkg = blkg->parent;
2119 }
2120 rcu_read_unlock();
2121
2122 return ret_blkg;
2123 }
2124
2125 /**
2126 * bio_associate_blkg_from_css - associate a bio with a specified css
2127 * @bio: target bio
2128 * @css: target css
2129 *
2130 * Associate @bio with the blkg found by combining the css's blkg and the
2131 * request_queue of the @bio. An association failure is handled by walking up
2132 * the blkg tree. Therefore, the blkg associated can be anything between @blkg
2133 * and q->root_blkg. This situation only happens when a cgroup is dying and
2134 * then the remaining bios will spill to the closest alive blkg.
2135 *
2136 * A reference will be taken on the blkg and will be released when @bio is
2137 * freed.
2138 */
bio_associate_blkg_from_css(struct bio * bio,struct cgroup_subsys_state * css)2139 void bio_associate_blkg_from_css(struct bio *bio,
2140 struct cgroup_subsys_state *css)
2141 {
2142 if (bio->bi_blkg)
2143 blkg_put(bio->bi_blkg);
2144
2145 if (css && css->parent) {
2146 bio->bi_blkg = blkg_tryget_closest(bio, css);
2147 } else {
2148 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg);
2149 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg;
2150 }
2151 }
2152 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
2153
2154 /**
2155 * bio_associate_blkg - associate a bio with a blkg
2156 * @bio: target bio
2157 *
2158 * Associate @bio with the blkg found from the bio's css and request_queue.
2159 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
2160 * already associated, the css is reused and association redone as the
2161 * request_queue may have changed.
2162 */
bio_associate_blkg(struct bio * bio)2163 void bio_associate_blkg(struct bio *bio)
2164 {
2165 struct cgroup_subsys_state *css;
2166
2167 if (blk_op_is_passthrough(bio->bi_opf))
2168 return;
2169
2170 rcu_read_lock();
2171
2172 if (bio->bi_blkg)
2173 css = bio_blkcg_css(bio);
2174 else
2175 css = blkcg_css();
2176
2177 bio_associate_blkg_from_css(bio, css);
2178
2179 rcu_read_unlock();
2180 }
2181 EXPORT_SYMBOL_GPL(bio_associate_blkg);
2182
2183 /**
2184 * bio_clone_blkg_association - clone blkg association from src to dst bio
2185 * @dst: destination bio
2186 * @src: source bio
2187 */
bio_clone_blkg_association(struct bio * dst,struct bio * src)2188 void bio_clone_blkg_association(struct bio *dst, struct bio *src)
2189 {
2190 if (src->bi_blkg)
2191 bio_associate_blkg_from_css(dst, bio_blkcg_css(src));
2192 }
2193 EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
2194
blk_cgroup_io_type(struct bio * bio)2195 static int blk_cgroup_io_type(struct bio *bio)
2196 {
2197 if (op_is_discard(bio->bi_opf))
2198 return BLKG_IOSTAT_DISCARD;
2199 if (op_is_write(bio->bi_opf))
2200 return BLKG_IOSTAT_WRITE;
2201 return BLKG_IOSTAT_READ;
2202 }
2203
blk_cgroup_bio_start(struct bio * bio)2204 void blk_cgroup_bio_start(struct bio *bio)
2205 {
2206 struct blkcg *blkcg = bio->bi_blkg->blkcg;
2207 int rwd = blk_cgroup_io_type(bio), cpu;
2208 struct blkg_iostat_set *bis;
2209 unsigned long flags;
2210
2211 if (!cgroup_subsys_on_dfl(io_cgrp_subsys))
2212 return;
2213
2214 /* Root-level stats are sourced from system-wide IO stats */
2215 if (!cgroup_parent(blkcg->css.cgroup))
2216 return;
2217
2218 cpu = get_cpu();
2219 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
2220 flags = u64_stats_update_begin_irqsave(&bis->sync);
2221
2222 /*
2223 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
2224 * bio and we would have already accounted for the size of the bio.
2225 */
2226 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
2227 bio_set_flag(bio, BIO_CGROUP_ACCT);
2228 bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
2229 }
2230 bis->cur.ios[rwd]++;
2231
2232 /*
2233 * If the iostat_cpu isn't in a lockless list, put it into the
2234 * list to indicate that a stat update is pending.
2235 */
2236 if (!READ_ONCE(bis->lqueued)) {
2237 struct llist_head *lhead = this_cpu_ptr(blkcg->lhead);
2238
2239 llist_add(&bis->lnode, lhead);
2240 WRITE_ONCE(bis->lqueued, true);
2241 }
2242
2243 u64_stats_update_end_irqrestore(&bis->sync, flags);
2244 css_rstat_updated(&blkcg->css, cpu);
2245 put_cpu();
2246 }
2247
blk_cgroup_congested(void)2248 bool blk_cgroup_congested(void)
2249 {
2250 struct blkcg *blkcg;
2251 bool ret = false;
2252
2253 rcu_read_lock();
2254 for (blkcg = css_to_blkcg(blkcg_css()); blkcg;
2255 blkcg = blkcg_parent(blkcg)) {
2256 if (atomic_read(&blkcg->congestion_count)) {
2257 ret = true;
2258 break;
2259 }
2260 }
2261 rcu_read_unlock();
2262 return ret;
2263 }
2264
2265 module_param(blkcg_debug_stats, bool, 0644);
2266 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
2267