131e4c28dSVivek Goyal #ifndef _BLK_CGROUP_H 231e4c28dSVivek Goyal #define _BLK_CGROUP_H 331e4c28dSVivek Goyal /* 431e4c28dSVivek Goyal * Common Block IO controller cgroup interface 531e4c28dSVivek Goyal * 631e4c28dSVivek Goyal * Based on ideas and code from CFQ, CFS and BFQ: 731e4c28dSVivek Goyal * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 831e4c28dSVivek Goyal * 931e4c28dSVivek Goyal * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 1031e4c28dSVivek Goyal * Paolo Valente <paolo.valente@unimore.it> 1131e4c28dSVivek Goyal * 1231e4c28dSVivek Goyal * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 1331e4c28dSVivek Goyal * Nauman Rafique <nauman@google.com> 1431e4c28dSVivek Goyal */ 1531e4c28dSVivek Goyal 1631e4c28dSVivek Goyal #include <linux/cgroup.h> 17575969a0SVivek Goyal #include <linux/u64_stats_sync.h> 18829fdb50STejun Heo #include <linux/seq_file.h> 19a637120eSTejun Heo #include <linux/radix-tree.h> 20a051661cSTejun Heo #include <linux/blkdev.h> 2131e4c28dSVivek Goyal 229355aedeSVivek Goyal /* Max limits for throttle policy */ 239355aedeSVivek Goyal #define THROTL_IOPS_MAX UINT_MAX 249355aedeSVivek Goyal 253381cb8dSTejun Heo /* CFQ specific, out here for blkcg->cfq_weight */ 263381cb8dSTejun Heo #define CFQ_WEIGHT_MIN 10 273381cb8dSTejun Heo #define CFQ_WEIGHT_MAX 1000 283381cb8dSTejun Heo #define CFQ_WEIGHT_DEFAULT 500 293381cb8dSTejun Heo 30f48ec1d7STejun Heo #ifdef CONFIG_BLK_CGROUP 31f48ec1d7STejun Heo 32edcb0722STejun Heo enum blkg_rwstat_type { 33edcb0722STejun Heo BLKG_RWSTAT_READ, 34edcb0722STejun Heo BLKG_RWSTAT_WRITE, 35edcb0722STejun Heo BLKG_RWSTAT_SYNC, 36edcb0722STejun Heo BLKG_RWSTAT_ASYNC, 37edcb0722STejun Heo 38edcb0722STejun Heo BLKG_RWSTAT_NR, 39edcb0722STejun Heo BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, 40303a3acbSDivyesh Shah }; 41303a3acbSDivyesh Shah 42a637120eSTejun Heo struct blkcg_gq; 43a637120eSTejun Heo 443c798398STejun Heo struct blkcg { 4531e4c28dSVivek Goyal struct cgroup_subsys_state css; 4631e4c28dSVivek Goyal spinlock_t lock; 47a637120eSTejun Heo 48a637120eSTejun Heo struct radix_tree_root blkg_tree; 49a637120eSTejun Heo struct blkcg_gq *blkg_hint; 5031e4c28dSVivek Goyal struct hlist_head blkg_list; 519a9e8a26STejun Heo 529a9e8a26STejun Heo /* for policies to test whether associated blkcg has changed */ 539a9e8a26STejun Heo uint64_t id; 543381cb8dSTejun Heo 553c798398STejun Heo /* TODO: per-policy storage in blkcg */ 563381cb8dSTejun Heo unsigned int cfq_weight; /* belongs to cfq */ 57e71357e1STejun Heo unsigned int cfq_leaf_weight; 5831e4c28dSVivek Goyal }; 5931e4c28dSVivek Goyal 60edcb0722STejun Heo struct blkg_stat { 61edf1b879STejun Heo struct u64_stats_sync syncp; 62edcb0722STejun Heo uint64_t cnt; 63edcb0722STejun Heo }; 64edcb0722STejun Heo 65edcb0722STejun Heo struct blkg_rwstat { 66edcb0722STejun Heo struct u64_stats_sync syncp; 67edcb0722STejun Heo uint64_t cnt[BLKG_RWSTAT_NR]; 68edcb0722STejun Heo }; 69edcb0722STejun Heo 70f95a04afSTejun Heo /* 71f95a04afSTejun Heo * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 72f95a04afSTejun Heo * request_queue (q). This is used by blkcg policies which need to track 73f95a04afSTejun Heo * information per blkcg - q pair. 74f95a04afSTejun Heo * 75f95a04afSTejun Heo * There can be multiple active blkcg policies and each has its private 76f95a04afSTejun Heo * data on each blkg, the size of which is determined by 77f95a04afSTejun Heo * blkcg_policy->pd_size. blkcg core allocates and frees such areas 78f95a04afSTejun Heo * together with blkg and invokes pd_init/exit_fn() methods. 79f95a04afSTejun Heo * 80f95a04afSTejun Heo * Such private data must embed struct blkg_policy_data (pd) at the 81f95a04afSTejun Heo * beginning and pd_size can't be smaller than pd. 82f95a04afSTejun Heo */ 830381411eSTejun Heo struct blkg_policy_data { 84b276a876STejun Heo /* the blkg and policy id this per-policy data belongs to */ 853c798398STejun Heo struct blkcg_gq *blkg; 86b276a876STejun Heo int plid; 870381411eSTejun Heo 88a2b1693bSTejun Heo /* used during policy activation */ 89a2b1693bSTejun Heo struct list_head alloc_node; 900381411eSTejun Heo }; 910381411eSTejun Heo 923c798398STejun Heo /* association between a blk cgroup and a request queue */ 933c798398STejun Heo struct blkcg_gq { 94c875f4d0STejun Heo /* Pointer to the associated request_queue */ 95c875f4d0STejun Heo struct request_queue *q; 96e8989faeSTejun Heo struct list_head q_node; 9731e4c28dSVivek Goyal struct hlist_node blkcg_node; 983c798398STejun Heo struct blkcg *blkcg; 993c547865STejun Heo 1003c547865STejun Heo /* all non-root blkcg_gq's are guaranteed to have access to parent */ 1013c547865STejun Heo struct blkcg_gq *parent; 1023c547865STejun Heo 103a051661cSTejun Heo /* request allocation list for this blkcg-q pair */ 104a051661cSTejun Heo struct request_list rl; 1053c547865STejun Heo 1061adaf3ddSTejun Heo /* reference count */ 1071adaf3ddSTejun Heo int refcnt; 10822084190SVivek Goyal 109*f427d909STejun Heo /* is this blkg online? protected by both blkcg and q locks */ 110*f427d909STejun Heo bool online; 111*f427d909STejun Heo 1128bd435b3STejun Heo struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 1131adaf3ddSTejun Heo 1141adaf3ddSTejun Heo struct rcu_head rcu_head; 11531e4c28dSVivek Goyal }; 11631e4c28dSVivek Goyal 1173c798398STejun Heo typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); 118*f427d909STejun Heo typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg); 119*f427d909STejun Heo typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg); 1203c798398STejun Heo typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg); 1213c798398STejun Heo typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); 1223e252066SVivek Goyal 1233c798398STejun Heo struct blkcg_policy { 1248bd435b3STejun Heo int plid; 12536558c8aSTejun Heo /* policy specific private data size */ 126f95a04afSTejun Heo size_t pd_size; 12736558c8aSTejun Heo /* cgroup files for the policy */ 12836558c8aSTejun Heo struct cftype *cftypes; 129f9fcc2d3STejun Heo 130f9fcc2d3STejun Heo /* operations */ 131f9fcc2d3STejun Heo blkcg_pol_init_pd_fn *pd_init_fn; 132*f427d909STejun Heo blkcg_pol_online_pd_fn *pd_online_fn; 133*f427d909STejun Heo blkcg_pol_offline_pd_fn *pd_offline_fn; 134f9fcc2d3STejun Heo blkcg_pol_exit_pd_fn *pd_exit_fn; 135f9fcc2d3STejun Heo blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 1363e252066SVivek Goyal }; 1373e252066SVivek Goyal 1383c798398STejun Heo extern struct blkcg blkcg_root; 13936558c8aSTejun Heo 1403c798398STejun Heo struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); 1413c798398STejun Heo struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 14236558c8aSTejun Heo struct request_queue *q); 14336558c8aSTejun Heo int blkcg_init_queue(struct request_queue *q); 14436558c8aSTejun Heo void blkcg_drain_queue(struct request_queue *q); 14536558c8aSTejun Heo void blkcg_exit_queue(struct request_queue *q); 1465efd6113STejun Heo 1473e252066SVivek Goyal /* Blkio controller policy registration */ 1483c798398STejun Heo int blkcg_policy_register(struct blkcg_policy *pol); 1493c798398STejun Heo void blkcg_policy_unregister(struct blkcg_policy *pol); 15036558c8aSTejun Heo int blkcg_activate_policy(struct request_queue *q, 1513c798398STejun Heo const struct blkcg_policy *pol); 15236558c8aSTejun Heo void blkcg_deactivate_policy(struct request_queue *q, 1533c798398STejun Heo const struct blkcg_policy *pol); 1543e252066SVivek Goyal 1553c798398STejun Heo void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 156f95a04afSTejun Heo u64 (*prfill)(struct seq_file *, 157f95a04afSTejun Heo struct blkg_policy_data *, int), 1583c798398STejun Heo const struct blkcg_policy *pol, int data, 159ec399347STejun Heo bool show_total); 160f95a04afSTejun Heo u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); 161f95a04afSTejun Heo u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 162829fdb50STejun Heo const struct blkg_rwstat *rwstat); 163f95a04afSTejun Heo u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); 164f95a04afSTejun Heo u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 165f95a04afSTejun Heo int off); 166829fdb50STejun Heo 167829fdb50STejun Heo struct blkg_conf_ctx { 168829fdb50STejun Heo struct gendisk *disk; 1693c798398STejun Heo struct blkcg_gq *blkg; 170829fdb50STejun Heo u64 v; 171829fdb50STejun Heo }; 172829fdb50STejun Heo 1733c798398STejun Heo int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 1743c798398STejun Heo const char *input, struct blkg_conf_ctx *ctx); 175829fdb50STejun Heo void blkg_conf_finish(struct blkg_conf_ctx *ctx); 176829fdb50STejun Heo 177829fdb50STejun Heo 178b1208b56STejun Heo static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) 179b1208b56STejun Heo { 180b1208b56STejun Heo return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), 181b1208b56STejun Heo struct blkcg, css); 182b1208b56STejun Heo } 183b1208b56STejun Heo 184b1208b56STejun Heo static inline struct blkcg *task_blkcg(struct task_struct *tsk) 185b1208b56STejun Heo { 186b1208b56STejun Heo return container_of(task_subsys_state(tsk, blkio_subsys_id), 187b1208b56STejun Heo struct blkcg, css); 188b1208b56STejun Heo } 189b1208b56STejun Heo 190b1208b56STejun Heo static inline struct blkcg *bio_blkcg(struct bio *bio) 191b1208b56STejun Heo { 192b1208b56STejun Heo if (bio && bio->bi_css) 193b1208b56STejun Heo return container_of(bio->bi_css, struct blkcg, css); 194b1208b56STejun Heo return task_blkcg(current); 195b1208b56STejun Heo } 196b1208b56STejun Heo 1970381411eSTejun Heo /** 1983c547865STejun Heo * blkcg_parent - get the parent of a blkcg 1993c547865STejun Heo * @blkcg: blkcg of interest 2003c547865STejun Heo * 2013c547865STejun Heo * Return the parent blkcg of @blkcg. Can be called anytime. 2023c547865STejun Heo */ 2033c547865STejun Heo static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 2043c547865STejun Heo { 2053c547865STejun Heo struct cgroup *pcg = blkcg->css.cgroup->parent; 2063c547865STejun Heo 2073c547865STejun Heo return pcg ? cgroup_to_blkcg(pcg) : NULL; 2083c547865STejun Heo } 2093c547865STejun Heo 2103c547865STejun Heo /** 2110381411eSTejun Heo * blkg_to_pdata - get policy private data 2120381411eSTejun Heo * @blkg: blkg of interest 2130381411eSTejun Heo * @pol: policy of interest 2140381411eSTejun Heo * 2150381411eSTejun Heo * Return pointer to private data associated with the @blkg-@pol pair. 2160381411eSTejun Heo */ 217f95a04afSTejun Heo static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 2183c798398STejun Heo struct blkcg_policy *pol) 2190381411eSTejun Heo { 220f95a04afSTejun Heo return blkg ? blkg->pd[pol->plid] : NULL; 2210381411eSTejun Heo } 2220381411eSTejun Heo 2230381411eSTejun Heo /** 2240381411eSTejun Heo * pdata_to_blkg - get blkg associated with policy private data 225f95a04afSTejun Heo * @pd: policy private data of interest 2260381411eSTejun Heo * 227f95a04afSTejun Heo * @pd is policy private data. Determine the blkg it's associated with. 2280381411eSTejun Heo */ 229f95a04afSTejun Heo static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) 2300381411eSTejun Heo { 231f95a04afSTejun Heo return pd ? pd->blkg : NULL; 2320381411eSTejun Heo } 2330381411eSTejun Heo 23454e7ed12STejun Heo /** 23554e7ed12STejun Heo * blkg_path - format cgroup path of blkg 23654e7ed12STejun Heo * @blkg: blkg of interest 23754e7ed12STejun Heo * @buf: target buffer 23854e7ed12STejun Heo * @buflen: target buffer length 23954e7ed12STejun Heo * 24054e7ed12STejun Heo * Format the path of the cgroup of @blkg into @buf. 24154e7ed12STejun Heo */ 2423c798398STejun Heo static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) 243afc24d49SVivek Goyal { 24454e7ed12STejun Heo int ret; 24554e7ed12STejun Heo 24654e7ed12STejun Heo rcu_read_lock(); 24754e7ed12STejun Heo ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); 24854e7ed12STejun Heo rcu_read_unlock(); 24954e7ed12STejun Heo if (ret) 25054e7ed12STejun Heo strncpy(buf, "<unavailable>", buflen); 25154e7ed12STejun Heo return ret; 252afc24d49SVivek Goyal } 253afc24d49SVivek Goyal 2541adaf3ddSTejun Heo /** 2551adaf3ddSTejun Heo * blkg_get - get a blkg reference 2561adaf3ddSTejun Heo * @blkg: blkg to get 2571adaf3ddSTejun Heo * 2581adaf3ddSTejun Heo * The caller should be holding queue_lock and an existing reference. 2591adaf3ddSTejun Heo */ 2603c798398STejun Heo static inline void blkg_get(struct blkcg_gq *blkg) 2611adaf3ddSTejun Heo { 2621adaf3ddSTejun Heo lockdep_assert_held(blkg->q->queue_lock); 2631adaf3ddSTejun Heo WARN_ON_ONCE(!blkg->refcnt); 2641adaf3ddSTejun Heo blkg->refcnt++; 2651adaf3ddSTejun Heo } 2661adaf3ddSTejun Heo 2673c798398STejun Heo void __blkg_release(struct blkcg_gq *blkg); 2681adaf3ddSTejun Heo 2691adaf3ddSTejun Heo /** 2701adaf3ddSTejun Heo * blkg_put - put a blkg reference 2711adaf3ddSTejun Heo * @blkg: blkg to put 2721adaf3ddSTejun Heo * 2731adaf3ddSTejun Heo * The caller should be holding queue_lock. 2741adaf3ddSTejun Heo */ 2753c798398STejun Heo static inline void blkg_put(struct blkcg_gq *blkg) 2761adaf3ddSTejun Heo { 2771adaf3ddSTejun Heo lockdep_assert_held(blkg->q->queue_lock); 2781adaf3ddSTejun Heo WARN_ON_ONCE(blkg->refcnt <= 0); 2791adaf3ddSTejun Heo if (!--blkg->refcnt) 2801adaf3ddSTejun Heo __blkg_release(blkg); 2811adaf3ddSTejun Heo } 2821adaf3ddSTejun Heo 283edcb0722STejun Heo /** 284a051661cSTejun Heo * blk_get_rl - get request_list to use 285a051661cSTejun Heo * @q: request_queue of interest 286a051661cSTejun Heo * @bio: bio which will be attached to the allocated request (may be %NULL) 287a051661cSTejun Heo * 288a051661cSTejun Heo * The caller wants to allocate a request from @q to use for @bio. Find 289a051661cSTejun Heo * the request_list to use and obtain a reference on it. Should be called 290a051661cSTejun Heo * under queue_lock. This function is guaranteed to return non-%NULL 291a051661cSTejun Heo * request_list. 292a051661cSTejun Heo */ 293a051661cSTejun Heo static inline struct request_list *blk_get_rl(struct request_queue *q, 294a051661cSTejun Heo struct bio *bio) 295a051661cSTejun Heo { 296a051661cSTejun Heo struct blkcg *blkcg; 297a051661cSTejun Heo struct blkcg_gq *blkg; 298a051661cSTejun Heo 299a051661cSTejun Heo rcu_read_lock(); 300a051661cSTejun Heo 301a051661cSTejun Heo blkcg = bio_blkcg(bio); 302a051661cSTejun Heo 303a051661cSTejun Heo /* bypass blkg lookup and use @q->root_rl directly for root */ 304a051661cSTejun Heo if (blkcg == &blkcg_root) 305a051661cSTejun Heo goto root_rl; 306a051661cSTejun Heo 307a051661cSTejun Heo /* 308a051661cSTejun Heo * Try to use blkg->rl. blkg lookup may fail under memory pressure 309a051661cSTejun Heo * or if either the blkcg or queue is going away. Fall back to 310a051661cSTejun Heo * root_rl in such cases. 311a051661cSTejun Heo */ 312a051661cSTejun Heo blkg = blkg_lookup_create(blkcg, q); 313a051661cSTejun Heo if (unlikely(IS_ERR(blkg))) 314a051661cSTejun Heo goto root_rl; 315a051661cSTejun Heo 316a051661cSTejun Heo blkg_get(blkg); 317a051661cSTejun Heo rcu_read_unlock(); 318a051661cSTejun Heo return &blkg->rl; 319a051661cSTejun Heo root_rl: 320a051661cSTejun Heo rcu_read_unlock(); 321a051661cSTejun Heo return &q->root_rl; 322a051661cSTejun Heo } 323a051661cSTejun Heo 324a051661cSTejun Heo /** 325a051661cSTejun Heo * blk_put_rl - put request_list 326a051661cSTejun Heo * @rl: request_list to put 327a051661cSTejun Heo * 328a051661cSTejun Heo * Put the reference acquired by blk_get_rl(). Should be called under 329a051661cSTejun Heo * queue_lock. 330a051661cSTejun Heo */ 331a051661cSTejun Heo static inline void blk_put_rl(struct request_list *rl) 332a051661cSTejun Heo { 333a051661cSTejun Heo /* root_rl may not have blkg set */ 334a051661cSTejun Heo if (rl->blkg && rl->blkg->blkcg != &blkcg_root) 335a051661cSTejun Heo blkg_put(rl->blkg); 336a051661cSTejun Heo } 337a051661cSTejun Heo 338a051661cSTejun Heo /** 339a051661cSTejun Heo * blk_rq_set_rl - associate a request with a request_list 340a051661cSTejun Heo * @rq: request of interest 341a051661cSTejun Heo * @rl: target request_list 342a051661cSTejun Heo * 343a051661cSTejun Heo * Associate @rq with @rl so that accounting and freeing can know the 344a051661cSTejun Heo * request_list @rq came from. 345a051661cSTejun Heo */ 346a051661cSTejun Heo static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) 347a051661cSTejun Heo { 348a051661cSTejun Heo rq->rl = rl; 349a051661cSTejun Heo } 350a051661cSTejun Heo 351a051661cSTejun Heo /** 352a051661cSTejun Heo * blk_rq_rl - return the request_list a request came from 353a051661cSTejun Heo * @rq: request of interest 354a051661cSTejun Heo * 355a051661cSTejun Heo * Return the request_list @rq is allocated from. 356a051661cSTejun Heo */ 357a051661cSTejun Heo static inline struct request_list *blk_rq_rl(struct request *rq) 358a051661cSTejun Heo { 359a051661cSTejun Heo return rq->rl; 360a051661cSTejun Heo } 361a051661cSTejun Heo 362a051661cSTejun Heo struct request_list *__blk_queue_next_rl(struct request_list *rl, 363a051661cSTejun Heo struct request_queue *q); 364a051661cSTejun Heo /** 365a051661cSTejun Heo * blk_queue_for_each_rl - iterate through all request_lists of a request_queue 366a051661cSTejun Heo * 367a051661cSTejun Heo * Should be used under queue_lock. 368a051661cSTejun Heo */ 369a051661cSTejun Heo #define blk_queue_for_each_rl(rl, q) \ 370a051661cSTejun Heo for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) 371a051661cSTejun Heo 372a051661cSTejun Heo /** 373edcb0722STejun Heo * blkg_stat_add - add a value to a blkg_stat 374edcb0722STejun Heo * @stat: target blkg_stat 375edcb0722STejun Heo * @val: value to add 376edcb0722STejun Heo * 377edcb0722STejun Heo * Add @val to @stat. The caller is responsible for synchronizing calls to 378edcb0722STejun Heo * this function. 379edcb0722STejun Heo */ 380edcb0722STejun Heo static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) 381edcb0722STejun Heo { 382edcb0722STejun Heo u64_stats_update_begin(&stat->syncp); 383edcb0722STejun Heo stat->cnt += val; 384edcb0722STejun Heo u64_stats_update_end(&stat->syncp); 385edcb0722STejun Heo } 386edcb0722STejun Heo 387edcb0722STejun Heo /** 388edcb0722STejun Heo * blkg_stat_read - read the current value of a blkg_stat 389edcb0722STejun Heo * @stat: blkg_stat to read 390edcb0722STejun Heo * 391edcb0722STejun Heo * Read the current value of @stat. This function can be called without 392edcb0722STejun Heo * synchroniztion and takes care of u64 atomicity. 393edcb0722STejun Heo */ 394edcb0722STejun Heo static inline uint64_t blkg_stat_read(struct blkg_stat *stat) 395edcb0722STejun Heo { 396edcb0722STejun Heo unsigned int start; 397edcb0722STejun Heo uint64_t v; 398edcb0722STejun Heo 399edcb0722STejun Heo do { 400edcb0722STejun Heo start = u64_stats_fetch_begin(&stat->syncp); 401edcb0722STejun Heo v = stat->cnt; 402edcb0722STejun Heo } while (u64_stats_fetch_retry(&stat->syncp, start)); 403edcb0722STejun Heo 404edcb0722STejun Heo return v; 405edcb0722STejun Heo } 406edcb0722STejun Heo 407edcb0722STejun Heo /** 408edcb0722STejun Heo * blkg_stat_reset - reset a blkg_stat 409edcb0722STejun Heo * @stat: blkg_stat to reset 410edcb0722STejun Heo */ 411edcb0722STejun Heo static inline void blkg_stat_reset(struct blkg_stat *stat) 412edcb0722STejun Heo { 413edcb0722STejun Heo stat->cnt = 0; 414edcb0722STejun Heo } 415edcb0722STejun Heo 416edcb0722STejun Heo /** 417edcb0722STejun Heo * blkg_rwstat_add - add a value to a blkg_rwstat 418edcb0722STejun Heo * @rwstat: target blkg_rwstat 419edcb0722STejun Heo * @rw: mask of REQ_{WRITE|SYNC} 420edcb0722STejun Heo * @val: value to add 421edcb0722STejun Heo * 422edcb0722STejun Heo * Add @val to @rwstat. The counters are chosen according to @rw. The 423edcb0722STejun Heo * caller is responsible for synchronizing calls to this function. 424edcb0722STejun Heo */ 425edcb0722STejun Heo static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, 426edcb0722STejun Heo int rw, uint64_t val) 427edcb0722STejun Heo { 428edcb0722STejun Heo u64_stats_update_begin(&rwstat->syncp); 429edcb0722STejun Heo 430edcb0722STejun Heo if (rw & REQ_WRITE) 431edcb0722STejun Heo rwstat->cnt[BLKG_RWSTAT_WRITE] += val; 432edcb0722STejun Heo else 433edcb0722STejun Heo rwstat->cnt[BLKG_RWSTAT_READ] += val; 434edcb0722STejun Heo if (rw & REQ_SYNC) 435edcb0722STejun Heo rwstat->cnt[BLKG_RWSTAT_SYNC] += val; 436edcb0722STejun Heo else 437edcb0722STejun Heo rwstat->cnt[BLKG_RWSTAT_ASYNC] += val; 438edcb0722STejun Heo 439edcb0722STejun Heo u64_stats_update_end(&rwstat->syncp); 440edcb0722STejun Heo } 441edcb0722STejun Heo 442edcb0722STejun Heo /** 443edcb0722STejun Heo * blkg_rwstat_read - read the current values of a blkg_rwstat 444edcb0722STejun Heo * @rwstat: blkg_rwstat to read 445edcb0722STejun Heo * 446edcb0722STejun Heo * Read the current snapshot of @rwstat and return it as the return value. 447edcb0722STejun Heo * This function can be called without synchronization and takes care of 448edcb0722STejun Heo * u64 atomicity. 449edcb0722STejun Heo */ 450c94bed89STejun Heo static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) 451edcb0722STejun Heo { 452edcb0722STejun Heo unsigned int start; 453edcb0722STejun Heo struct blkg_rwstat tmp; 454edcb0722STejun Heo 455edcb0722STejun Heo do { 456edcb0722STejun Heo start = u64_stats_fetch_begin(&rwstat->syncp); 457edcb0722STejun Heo tmp = *rwstat; 458edcb0722STejun Heo } while (u64_stats_fetch_retry(&rwstat->syncp, start)); 459edcb0722STejun Heo 460edcb0722STejun Heo return tmp; 461edcb0722STejun Heo } 462edcb0722STejun Heo 463edcb0722STejun Heo /** 464edcb0722STejun Heo * blkg_rwstat_sum - read the total count of a blkg_rwstat 465edcb0722STejun Heo * @rwstat: blkg_rwstat to read 466edcb0722STejun Heo * 467edcb0722STejun Heo * Return the total count of @rwstat regardless of the IO direction. This 468edcb0722STejun Heo * function can be called without synchronization and takes care of u64 469edcb0722STejun Heo * atomicity. 470edcb0722STejun Heo */ 471edcb0722STejun Heo static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat) 472edcb0722STejun Heo { 473edcb0722STejun Heo struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); 474edcb0722STejun Heo 475edcb0722STejun Heo return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; 476edcb0722STejun Heo } 477edcb0722STejun Heo 478edcb0722STejun Heo /** 479edcb0722STejun Heo * blkg_rwstat_reset - reset a blkg_rwstat 480edcb0722STejun Heo * @rwstat: blkg_rwstat to reset 481edcb0722STejun Heo */ 482edcb0722STejun Heo static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) 483edcb0722STejun Heo { 484edcb0722STejun Heo memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); 485edcb0722STejun Heo } 486edcb0722STejun Heo 48736558c8aSTejun Heo #else /* CONFIG_BLK_CGROUP */ 48836558c8aSTejun Heo 48936558c8aSTejun Heo struct cgroup; 490b1208b56STejun Heo struct blkcg; 4912f5ea477SJens Axboe 492f95a04afSTejun Heo struct blkg_policy_data { 493f95a04afSTejun Heo }; 494f95a04afSTejun Heo 4953c798398STejun Heo struct blkcg_gq { 4962f5ea477SJens Axboe }; 4972f5ea477SJens Axboe 4983c798398STejun Heo struct blkcg_policy { 4993e252066SVivek Goyal }; 5003e252066SVivek Goyal 5013c798398STejun Heo static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } 5025efd6113STejun Heo static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 5035efd6113STejun Heo static inline void blkcg_drain_queue(struct request_queue *q) { } 5045efd6113STejun Heo static inline void blkcg_exit_queue(struct request_queue *q) { } 5053c798398STejun Heo static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 5063c798398STejun Heo static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 507a2b1693bSTejun Heo static inline int blkcg_activate_policy(struct request_queue *q, 5083c798398STejun Heo const struct blkcg_policy *pol) { return 0; } 509a2b1693bSTejun Heo static inline void blkcg_deactivate_policy(struct request_queue *q, 5103c798398STejun Heo const struct blkcg_policy *pol) { } 5113e252066SVivek Goyal 512b1208b56STejun Heo static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } 513b1208b56STejun Heo static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } 514a051661cSTejun Heo 515f95a04afSTejun Heo static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 5163c798398STejun Heo struct blkcg_policy *pol) { return NULL; } 517f95a04afSTejun Heo static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } 5183c798398STejun Heo static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } 5193c798398STejun Heo static inline void blkg_get(struct blkcg_gq *blkg) { } 5203c798398STejun Heo static inline void blkg_put(struct blkcg_gq *blkg) { } 521afc24d49SVivek Goyal 522a051661cSTejun Heo static inline struct request_list *blk_get_rl(struct request_queue *q, 523a051661cSTejun Heo struct bio *bio) { return &q->root_rl; } 524a051661cSTejun Heo static inline void blk_put_rl(struct request_list *rl) { } 525a051661cSTejun Heo static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } 526a051661cSTejun Heo static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } 527a051661cSTejun Heo 528a051661cSTejun Heo #define blk_queue_for_each_rl(rl, q) \ 529a051661cSTejun Heo for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 530a051661cSTejun Heo 53136558c8aSTejun Heo #endif /* CONFIG_BLK_CGROUP */ 53231e4c28dSVivek Goyal #endif /* _BLK_CGROUP_H */ 533