xref: /linux/block/blk-cgroup.h (revision bfffa1cc9db8a950dd4b1a09999f8a20e69a6652)
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4  * Common Block IO controller cgroup interface
5  *
6  * Based on ideas and code from CFQ, CFS and BFQ:
7  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  *
9  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10  *		      Paolo Valente <paolo.valente@unimore.it>
11  *
12  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13  * 	              Nauman Rafique <nauman@google.com>
14  */
15 
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 #include <linux/atomic.h>
22 
23 /* Max limits for throttle policy */
24 #define THROTL_IOPS_MAX		UINT_MAX
25 
26 #ifdef CONFIG_BLK_CGROUP
27 
28 enum blkg_rwstat_type {
29 	BLKG_RWSTAT_READ,
30 	BLKG_RWSTAT_WRITE,
31 	BLKG_RWSTAT_SYNC,
32 	BLKG_RWSTAT_ASYNC,
33 
34 	BLKG_RWSTAT_NR,
35 	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
36 };
37 
38 struct blkcg_gq;
39 
40 struct blkcg {
41 	struct cgroup_subsys_state	css;
42 	spinlock_t			lock;
43 
44 	struct radix_tree_root		blkg_tree;
45 	struct blkcg_gq			*blkg_hint;
46 	struct hlist_head		blkg_list;
47 
48 	struct blkcg_policy_data	*pd[BLKCG_MAX_POLS];
49 };
50 
51 struct blkg_stat {
52 	struct u64_stats_sync		syncp;
53 	uint64_t			cnt;
54 };
55 
56 struct blkg_rwstat {
57 	struct u64_stats_sync		syncp;
58 	uint64_t			cnt[BLKG_RWSTAT_NR];
59 };
60 
61 /*
62  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
63  * request_queue (q).  This is used by blkcg policies which need to track
64  * information per blkcg - q pair.
65  *
66  * There can be multiple active blkcg policies and each has its private
67  * data on each blkg, the size of which is determined by
68  * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
69  * together with blkg and invokes pd_init/exit_fn() methods.
70  *
71  * Such private data must embed struct blkg_policy_data (pd) at the
72  * beginning and pd_size can't be smaller than pd.
73  */
74 struct blkg_policy_data {
75 	/* the blkg and policy id this per-policy data belongs to */
76 	struct blkcg_gq			*blkg;
77 	int				plid;
78 
79 	/* used during policy activation */
80 	struct list_head		alloc_node;
81 };
82 
83 /*
84  * Policies that need to keep per-blkcg data which is independent
85  * from any request_queue associated to it must specify its size
86  * with the cpd_size field of the blkcg_policy structure and
87  * embed a blkcg_policy_data in it. blkcg core allocates
88  * policy-specific per-blkcg structures lazily the first time
89  * they are actually needed, so it handles them together with
90  * blkgs. cpd_init() is invoked to let each policy handle
91  * per-blkcg data.
92  */
93 struct blkcg_policy_data {
94 	/* the policy id this per-policy data belongs to */
95 	int				plid;
96 
97 	/* used during policy activation */
98 	struct list_head		alloc_node;
99 };
100 
101 /* association between a blk cgroup and a request queue */
102 struct blkcg_gq {
103 	/* Pointer to the associated request_queue */
104 	struct request_queue		*q;
105 	struct list_head		q_node;
106 	struct hlist_node		blkcg_node;
107 	struct blkcg			*blkcg;
108 
109 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
110 	struct blkcg_gq			*parent;
111 
112 	/* request allocation list for this blkcg-q pair */
113 	struct request_list		rl;
114 
115 	/* reference count */
116 	atomic_t			refcnt;
117 
118 	/* is this blkg online? protected by both blkcg and q locks */
119 	bool				online;
120 
121 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
122 
123 	struct rcu_head			rcu_head;
124 };
125 
126 typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
127 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
128 typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
129 typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
130 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
131 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
132 
133 struct blkcg_policy {
134 	int				plid;
135 	/* policy specific private data size */
136 	size_t				pd_size;
137 	/* policy specific per-blkcg data size */
138 	size_t				cpd_size;
139 	/* cgroup files for the policy */
140 	struct cftype			*cftypes;
141 
142 	/* operations */
143 	blkcg_pol_init_cpd_fn		*cpd_init_fn;
144 	blkcg_pol_init_pd_fn		*pd_init_fn;
145 	blkcg_pol_online_pd_fn		*pd_online_fn;
146 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
147 	blkcg_pol_exit_pd_fn		*pd_exit_fn;
148 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
149 };
150 
151 extern struct blkcg blkcg_root;
152 
153 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
154 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
155 				    struct request_queue *q);
156 int blkcg_init_queue(struct request_queue *q);
157 void blkcg_drain_queue(struct request_queue *q);
158 void blkcg_exit_queue(struct request_queue *q);
159 
160 /* Blkio controller policy registration */
161 int blkcg_policy_register(struct blkcg_policy *pol);
162 void blkcg_policy_unregister(struct blkcg_policy *pol);
163 int blkcg_activate_policy(struct request_queue *q,
164 			  const struct blkcg_policy *pol);
165 void blkcg_deactivate_policy(struct request_queue *q,
166 			     const struct blkcg_policy *pol);
167 
168 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
169 		       u64 (*prfill)(struct seq_file *,
170 				     struct blkg_policy_data *, int),
171 		       const struct blkcg_policy *pol, int data,
172 		       bool show_total);
173 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
174 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
175 			 const struct blkg_rwstat *rwstat);
176 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
177 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
178 		       int off);
179 
180 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
181 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
182 					     int off);
183 
184 struct blkg_conf_ctx {
185 	struct gendisk			*disk;
186 	struct blkcg_gq			*blkg;
187 	u64				v;
188 };
189 
190 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
191 		   const char *input, struct blkg_conf_ctx *ctx);
192 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
193 
194 
195 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
196 {
197 	return css ? container_of(css, struct blkcg, css) : NULL;
198 }
199 
200 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
201 {
202 	return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
203 }
204 
205 static inline struct blkcg *bio_blkcg(struct bio *bio)
206 {
207 	if (bio && bio->bi_css)
208 		return css_to_blkcg(bio->bi_css);
209 	return task_blkcg(current);
210 }
211 
212 /**
213  * blkcg_parent - get the parent of a blkcg
214  * @blkcg: blkcg of interest
215  *
216  * Return the parent blkcg of @blkcg.  Can be called anytime.
217  */
218 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
219 {
220 	return css_to_blkcg(blkcg->css.parent);
221 }
222 
223 /**
224  * blkg_to_pdata - get policy private data
225  * @blkg: blkg of interest
226  * @pol: policy of interest
227  *
228  * Return pointer to private data associated with the @blkg-@pol pair.
229  */
230 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
231 						  struct blkcg_policy *pol)
232 {
233 	return blkg ? blkg->pd[pol->plid] : NULL;
234 }
235 
236 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
237 						     struct blkcg_policy *pol)
238 {
239 	return blkcg ? blkcg->pd[pol->plid] : NULL;
240 }
241 
242 /**
243  * pdata_to_blkg - get blkg associated with policy private data
244  * @pd: policy private data of interest
245  *
246  * @pd is policy private data.  Determine the blkg it's associated with.
247  */
248 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
249 {
250 	return pd ? pd->blkg : NULL;
251 }
252 
253 /**
254  * blkg_path - format cgroup path of blkg
255  * @blkg: blkg of interest
256  * @buf: target buffer
257  * @buflen: target buffer length
258  *
259  * Format the path of the cgroup of @blkg into @buf.
260  */
261 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
262 {
263 	char *p;
264 
265 	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
266 	if (!p) {
267 		strncpy(buf, "<unavailable>", buflen);
268 		return -ENAMETOOLONG;
269 	}
270 
271 	memmove(buf, p, buf + buflen - p);
272 	return 0;
273 }
274 
275 /**
276  * blkg_get - get a blkg reference
277  * @blkg: blkg to get
278  *
279  * The caller should be holding an existing reference.
280  */
281 static inline void blkg_get(struct blkcg_gq *blkg)
282 {
283 	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
284 	atomic_inc(&blkg->refcnt);
285 }
286 
287 void __blkg_release_rcu(struct rcu_head *rcu);
288 
289 /**
290  * blkg_put - put a blkg reference
291  * @blkg: blkg to put
292  */
293 static inline void blkg_put(struct blkcg_gq *blkg)
294 {
295 	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
296 	if (atomic_dec_and_test(&blkg->refcnt))
297 		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
298 }
299 
300 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
301 			       bool update_hint);
302 
303 /**
304  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
305  * @d_blkg: loop cursor pointing to the current descendant
306  * @pos_css: used for iteration
307  * @p_blkg: target blkg to walk descendants of
308  *
309  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
310  * read locked.  If called under either blkcg or queue lock, the iteration
311  * is guaranteed to include all and only online blkgs.  The caller may
312  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
313  * @p_blkg is included in the iteration and the first node to be visited.
314  */
315 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
316 	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
317 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
318 					      (p_blkg)->q, false)))
319 
320 /**
321  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
322  * @d_blkg: loop cursor pointing to the current descendant
323  * @pos_css: used for iteration
324  * @p_blkg: target blkg to walk descendants of
325  *
326  * Similar to blkg_for_each_descendant_pre() but performs post-order
327  * traversal instead.  Synchronization rules are the same.  @p_blkg is
328  * included in the iteration and the last node to be visited.
329  */
330 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
331 	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
332 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
333 					      (p_blkg)->q, false)))
334 
335 /**
336  * blk_get_rl - get request_list to use
337  * @q: request_queue of interest
338  * @bio: bio which will be attached to the allocated request (may be %NULL)
339  *
340  * The caller wants to allocate a request from @q to use for @bio.  Find
341  * the request_list to use and obtain a reference on it.  Should be called
342  * under queue_lock.  This function is guaranteed to return non-%NULL
343  * request_list.
344  */
345 static inline struct request_list *blk_get_rl(struct request_queue *q,
346 					      struct bio *bio)
347 {
348 	struct blkcg *blkcg;
349 	struct blkcg_gq *blkg;
350 
351 	rcu_read_lock();
352 
353 	blkcg = bio_blkcg(bio);
354 
355 	/* bypass blkg lookup and use @q->root_rl directly for root */
356 	if (blkcg == &blkcg_root)
357 		goto root_rl;
358 
359 	/*
360 	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
361 	 * or if either the blkcg or queue is going away.  Fall back to
362 	 * root_rl in such cases.
363 	 */
364 	blkg = blkg_lookup_create(blkcg, q);
365 	if (unlikely(IS_ERR(blkg)))
366 		goto root_rl;
367 
368 	blkg_get(blkg);
369 	rcu_read_unlock();
370 	return &blkg->rl;
371 root_rl:
372 	rcu_read_unlock();
373 	return &q->root_rl;
374 }
375 
376 /**
377  * blk_put_rl - put request_list
378  * @rl: request_list to put
379  *
380  * Put the reference acquired by blk_get_rl().  Should be called under
381  * queue_lock.
382  */
383 static inline void blk_put_rl(struct request_list *rl)
384 {
385 	/* root_rl may not have blkg set */
386 	if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
387 		blkg_put(rl->blkg);
388 }
389 
390 /**
391  * blk_rq_set_rl - associate a request with a request_list
392  * @rq: request of interest
393  * @rl: target request_list
394  *
395  * Associate @rq with @rl so that accounting and freeing can know the
396  * request_list @rq came from.
397  */
398 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
399 {
400 	rq->rl = rl;
401 }
402 
403 /**
404  * blk_rq_rl - return the request_list a request came from
405  * @rq: request of interest
406  *
407  * Return the request_list @rq is allocated from.
408  */
409 static inline struct request_list *blk_rq_rl(struct request *rq)
410 {
411 	return rq->rl;
412 }
413 
414 struct request_list *__blk_queue_next_rl(struct request_list *rl,
415 					 struct request_queue *q);
416 /**
417  * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
418  *
419  * Should be used under queue_lock.
420  */
421 #define blk_queue_for_each_rl(rl, q)	\
422 	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
423 
424 static inline void blkg_stat_init(struct blkg_stat *stat)
425 {
426 	u64_stats_init(&stat->syncp);
427 }
428 
429 /**
430  * blkg_stat_add - add a value to a blkg_stat
431  * @stat: target blkg_stat
432  * @val: value to add
433  *
434  * Add @val to @stat.  The caller is responsible for synchronizing calls to
435  * this function.
436  */
437 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
438 {
439 	u64_stats_update_begin(&stat->syncp);
440 	stat->cnt += val;
441 	u64_stats_update_end(&stat->syncp);
442 }
443 
444 /**
445  * blkg_stat_read - read the current value of a blkg_stat
446  * @stat: blkg_stat to read
447  *
448  * Read the current value of @stat.  This function can be called without
449  * synchroniztion and takes care of u64 atomicity.
450  */
451 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
452 {
453 	unsigned int start;
454 	uint64_t v;
455 
456 	do {
457 		start = u64_stats_fetch_begin_irq(&stat->syncp);
458 		v = stat->cnt;
459 	} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
460 
461 	return v;
462 }
463 
464 /**
465  * blkg_stat_reset - reset a blkg_stat
466  * @stat: blkg_stat to reset
467  */
468 static inline void blkg_stat_reset(struct blkg_stat *stat)
469 {
470 	stat->cnt = 0;
471 }
472 
473 /**
474  * blkg_stat_merge - merge a blkg_stat into another
475  * @to: the destination blkg_stat
476  * @from: the source
477  *
478  * Add @from's count to @to.
479  */
480 static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
481 {
482 	blkg_stat_add(to, blkg_stat_read(from));
483 }
484 
485 static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
486 {
487 	u64_stats_init(&rwstat->syncp);
488 }
489 
490 /**
491  * blkg_rwstat_add - add a value to a blkg_rwstat
492  * @rwstat: target blkg_rwstat
493  * @rw: mask of REQ_{WRITE|SYNC}
494  * @val: value to add
495  *
496  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
497  * caller is responsible for synchronizing calls to this function.
498  */
499 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
500 				   int rw, uint64_t val)
501 {
502 	u64_stats_update_begin(&rwstat->syncp);
503 
504 	if (rw & REQ_WRITE)
505 		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
506 	else
507 		rwstat->cnt[BLKG_RWSTAT_READ] += val;
508 	if (rw & REQ_SYNC)
509 		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
510 	else
511 		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
512 
513 	u64_stats_update_end(&rwstat->syncp);
514 }
515 
516 /**
517  * blkg_rwstat_read - read the current values of a blkg_rwstat
518  * @rwstat: blkg_rwstat to read
519  *
520  * Read the current snapshot of @rwstat and return it as the return value.
521  * This function can be called without synchronization and takes care of
522  * u64 atomicity.
523  */
524 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
525 {
526 	unsigned int start;
527 	struct blkg_rwstat tmp;
528 
529 	do {
530 		start = u64_stats_fetch_begin_irq(&rwstat->syncp);
531 		tmp = *rwstat;
532 	} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
533 
534 	return tmp;
535 }
536 
537 /**
538  * blkg_rwstat_total - read the total count of a blkg_rwstat
539  * @rwstat: blkg_rwstat to read
540  *
541  * Return the total count of @rwstat regardless of the IO direction.  This
542  * function can be called without synchronization and takes care of u64
543  * atomicity.
544  */
545 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
546 {
547 	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
548 
549 	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
550 }
551 
552 /**
553  * blkg_rwstat_reset - reset a blkg_rwstat
554  * @rwstat: blkg_rwstat to reset
555  */
556 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
557 {
558 	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
559 }
560 
561 /**
562  * blkg_rwstat_merge - merge a blkg_rwstat into another
563  * @to: the destination blkg_rwstat
564  * @from: the source
565  *
566  * Add @from's counts to @to.
567  */
568 static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
569 				     struct blkg_rwstat *from)
570 {
571 	struct blkg_rwstat v = blkg_rwstat_read(from);
572 	int i;
573 
574 	u64_stats_update_begin(&to->syncp);
575 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
576 		to->cnt[i] += v.cnt[i];
577 	u64_stats_update_end(&to->syncp);
578 }
579 
580 #else	/* CONFIG_BLK_CGROUP */
581 
582 struct cgroup;
583 struct blkcg;
584 
585 struct blkg_policy_data {
586 };
587 
588 struct blkcg_policy_data {
589 };
590 
591 struct blkcg_gq {
592 };
593 
594 struct blkcg_policy {
595 };
596 
597 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
598 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
599 static inline void blkcg_drain_queue(struct request_queue *q) { }
600 static inline void blkcg_exit_queue(struct request_queue *q) { }
601 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
602 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
603 static inline int blkcg_activate_policy(struct request_queue *q,
604 					const struct blkcg_policy *pol) { return 0; }
605 static inline void blkcg_deactivate_policy(struct request_queue *q,
606 					   const struct blkcg_policy *pol) { }
607 
608 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
609 
610 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
611 						  struct blkcg_policy *pol) { return NULL; }
612 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
613 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
614 static inline void blkg_get(struct blkcg_gq *blkg) { }
615 static inline void blkg_put(struct blkcg_gq *blkg) { }
616 
617 static inline struct request_list *blk_get_rl(struct request_queue *q,
618 					      struct bio *bio) { return &q->root_rl; }
619 static inline void blk_put_rl(struct request_list *rl) { }
620 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
621 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
622 
623 #define blk_queue_for_each_rl(rl, q)	\
624 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
625 
626 #endif	/* CONFIG_BLK_CGROUP */
627 #endif	/* _BLK_CGROUP_H */
628