131e4c28dSVivek Goyal #ifndef _BLK_CGROUP_H 231e4c28dSVivek Goyal #define _BLK_CGROUP_H 331e4c28dSVivek Goyal /* 431e4c28dSVivek Goyal * Common Block IO controller cgroup interface 531e4c28dSVivek Goyal * 631e4c28dSVivek Goyal * Based on ideas and code from CFQ, CFS and BFQ: 731e4c28dSVivek Goyal * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 831e4c28dSVivek Goyal * 931e4c28dSVivek Goyal * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 1031e4c28dSVivek Goyal * Paolo Valente <paolo.valente@unimore.it> 1131e4c28dSVivek Goyal * 1231e4c28dSVivek Goyal * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 1331e4c28dSVivek Goyal * Nauman Rafique <nauman@google.com> 1431e4c28dSVivek Goyal */ 1531e4c28dSVivek Goyal 1631e4c28dSVivek Goyal #include <linux/cgroup.h> 17575969a0SVivek Goyal #include <linux/u64_stats_sync.h> 1831e4c28dSVivek Goyal 19062a644dSVivek Goyal enum blkio_policy_id { 20062a644dSVivek Goyal BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */ 214c9eefa1SVivek Goyal BLKIO_POLICY_THROTL, /* Throttling */ 22035d10b2STejun Heo 23035d10b2STejun Heo BLKIO_NR_POLICIES, 24062a644dSVivek Goyal }; 25062a644dSVivek Goyal 269355aedeSVivek Goyal /* Max limits for throttle policy */ 279355aedeSVivek Goyal #define THROTL_IOPS_MAX UINT_MAX 289355aedeSVivek Goyal 2932e380aeSTejun Heo #ifdef CONFIG_BLK_CGROUP 302f5ea477SJens Axboe 3184c124daSDivyesh Shah enum stat_type { 325fe224d2STejun Heo /* Number of IOs merged */ 335fe224d2STejun Heo BLKIO_STAT_MERGED, 3484c124daSDivyesh Shah /* Total time spent (in ns) between request dispatch to the driver and 3584c124daSDivyesh Shah * request completion for IOs doen by this cgroup. This may not be 3684c124daSDivyesh Shah * accurate when NCQ is turned on. */ 375fe224d2STejun Heo BLKIO_STAT_SERVICE_TIME, 3884c124daSDivyesh Shah /* Total time spent waiting in scheduler queue in ns */ 3984c124daSDivyesh Shah BLKIO_STAT_WAIT_TIME, 40cdc1184cSDivyesh Shah /* Number of IOs queued up */ 41cdc1184cSDivyesh Shah BLKIO_STAT_QUEUED, 42c4c76a05STejun Heo 4384c124daSDivyesh Shah /* All the single valued stats go below this */ 4484c124daSDivyesh Shah BLKIO_STAT_TIME, 45a23e6869SVivek Goyal #ifdef CONFIG_DEBUG_BLK_CGROUP 46167400d3SJustin TerAvest /* Time not charged to this cgroup */ 47167400d3SJustin TerAvest BLKIO_STAT_UNACCOUNTED_TIME, 48cdc1184cSDivyesh Shah BLKIO_STAT_AVG_QUEUE_SIZE, 49812df48dSDivyesh Shah BLKIO_STAT_IDLE_TIME, 50812df48dSDivyesh Shah BLKIO_STAT_EMPTY_TIME, 51812df48dSDivyesh Shah BLKIO_STAT_GROUP_WAIT_TIME, 5284c124daSDivyesh Shah BLKIO_STAT_DEQUEUE 5384c124daSDivyesh Shah #endif 5484c124daSDivyesh Shah }; 5584c124daSDivyesh Shah 56c4c76a05STejun Heo /* Types lower than this live in stat_arr and have subtypes */ 57c4c76a05STejun Heo #define BLKIO_STAT_ARR_NR (BLKIO_STAT_QUEUED + 1) 58c4c76a05STejun Heo 595624a4e4SVivek Goyal /* Per cpu stats */ 605624a4e4SVivek Goyal enum stat_type_cpu { 615624a4e4SVivek Goyal BLKIO_STAT_CPU_SECTORS, 625624a4e4SVivek Goyal /* Total bytes transferred */ 635624a4e4SVivek Goyal BLKIO_STAT_CPU_SERVICE_BYTES, 645624a4e4SVivek Goyal /* Total IOs serviced, post merge */ 655624a4e4SVivek Goyal BLKIO_STAT_CPU_SERVICED, 665624a4e4SVivek Goyal BLKIO_STAT_CPU_NR 675624a4e4SVivek Goyal }; 685624a4e4SVivek Goyal 6984c124daSDivyesh Shah enum stat_sub_type { 7084c124daSDivyesh Shah BLKIO_STAT_READ = 0, 7184c124daSDivyesh Shah BLKIO_STAT_WRITE, 7284c124daSDivyesh Shah BLKIO_STAT_SYNC, 7384c124daSDivyesh Shah BLKIO_STAT_ASYNC, 7484c124daSDivyesh Shah BLKIO_STAT_TOTAL 75303a3acbSDivyesh Shah }; 76303a3acbSDivyesh Shah 77812df48dSDivyesh Shah /* blkg state flags */ 78812df48dSDivyesh Shah enum blkg_state_flags { 79812df48dSDivyesh Shah BLKG_waiting = 0, 80812df48dSDivyesh Shah BLKG_idling, 81812df48dSDivyesh Shah BLKG_empty, 82812df48dSDivyesh Shah }; 83812df48dSDivyesh Shah 84062a644dSVivek Goyal /* cgroup files owned by proportional weight policy */ 85062a644dSVivek Goyal enum blkcg_file_name_prop { 86062a644dSVivek Goyal BLKIO_PROP_weight = 1, 87062a644dSVivek Goyal BLKIO_PROP_weight_device, 88062a644dSVivek Goyal BLKIO_PROP_io_service_bytes, 89062a644dSVivek Goyal BLKIO_PROP_io_serviced, 90062a644dSVivek Goyal BLKIO_PROP_time, 91062a644dSVivek Goyal BLKIO_PROP_sectors, 92167400d3SJustin TerAvest BLKIO_PROP_unaccounted_time, 93062a644dSVivek Goyal BLKIO_PROP_io_service_time, 94062a644dSVivek Goyal BLKIO_PROP_io_wait_time, 95062a644dSVivek Goyal BLKIO_PROP_io_merged, 96062a644dSVivek Goyal BLKIO_PROP_io_queued, 97062a644dSVivek Goyal BLKIO_PROP_avg_queue_size, 98062a644dSVivek Goyal BLKIO_PROP_group_wait_time, 99062a644dSVivek Goyal BLKIO_PROP_idle_time, 100062a644dSVivek Goyal BLKIO_PROP_empty_time, 101062a644dSVivek Goyal BLKIO_PROP_dequeue, 102062a644dSVivek Goyal }; 103062a644dSVivek Goyal 1044c9eefa1SVivek Goyal /* cgroup files owned by throttle policy */ 1054c9eefa1SVivek Goyal enum blkcg_file_name_throtl { 1064c9eefa1SVivek Goyal BLKIO_THROTL_read_bps_device, 1074c9eefa1SVivek Goyal BLKIO_THROTL_write_bps_device, 1087702e8f4SVivek Goyal BLKIO_THROTL_read_iops_device, 1097702e8f4SVivek Goyal BLKIO_THROTL_write_iops_device, 1104c9eefa1SVivek Goyal BLKIO_THROTL_io_service_bytes, 1114c9eefa1SVivek Goyal BLKIO_THROTL_io_serviced, 1124c9eefa1SVivek Goyal }; 1134c9eefa1SVivek Goyal 11431e4c28dSVivek Goyal struct blkio_cgroup { 11531e4c28dSVivek Goyal struct cgroup_subsys_state css; 11631e4c28dSVivek Goyal unsigned int weight; 11731e4c28dSVivek Goyal spinlock_t lock; 11831e4c28dSVivek Goyal struct hlist_head blkg_list; 1199a9e8a26STejun Heo 1209a9e8a26STejun Heo /* for policies to test whether associated blkcg has changed */ 1219a9e8a26STejun Heo uint64_t id; 12231e4c28dSVivek Goyal }; 12331e4c28dSVivek Goyal 124303a3acbSDivyesh Shah struct blkio_group_stats { 125edf1b879STejun Heo struct u64_stats_sync syncp; 126303a3acbSDivyesh Shah /* total disk time and nr sectors dispatched by this group */ 127303a3acbSDivyesh Shah uint64_t time; 128c4c76a05STejun Heo uint64_t stat_arr[BLKIO_STAT_ARR_NR][BLKIO_STAT_TOTAL]; 129303a3acbSDivyesh Shah #ifdef CONFIG_DEBUG_BLK_CGROUP 130a23e6869SVivek Goyal /* Time not charged to this cgroup */ 131a23e6869SVivek Goyal uint64_t unaccounted_time; 132a23e6869SVivek Goyal 133cdc1184cSDivyesh Shah /* Sum of number of IOs queued across all samples */ 134cdc1184cSDivyesh Shah uint64_t avg_queue_size_sum; 135cdc1184cSDivyesh Shah /* Count of samples taken for average */ 136cdc1184cSDivyesh Shah uint64_t avg_queue_size_samples; 137303a3acbSDivyesh Shah /* How many times this group has been removed from service tree */ 138303a3acbSDivyesh Shah unsigned long dequeue; 139812df48dSDivyesh Shah 140812df48dSDivyesh Shah /* Total time spent waiting for it to be assigned a timeslice. */ 141812df48dSDivyesh Shah uint64_t group_wait_time; 142812df48dSDivyesh Shah 143812df48dSDivyesh Shah /* Time spent idling for this blkio_group */ 144812df48dSDivyesh Shah uint64_t idle_time; 145812df48dSDivyesh Shah /* 146812df48dSDivyesh Shah * Total time when we have requests queued and do not contain the 147812df48dSDivyesh Shah * current active queue. 148812df48dSDivyesh Shah */ 149812df48dSDivyesh Shah uint64_t empty_time; 150997a026cSTejun Heo 151997a026cSTejun Heo /* fields after this shouldn't be cleared on stat reset */ 152997a026cSTejun Heo uint64_t start_group_wait_time; 153997a026cSTejun Heo uint64_t start_idle_time; 154812df48dSDivyesh Shah uint64_t start_empty_time; 155812df48dSDivyesh Shah uint16_t flags; 156303a3acbSDivyesh Shah #endif 157303a3acbSDivyesh Shah }; 158303a3acbSDivyesh Shah 159997a026cSTejun Heo #ifdef CONFIG_DEBUG_BLK_CGROUP 160997a026cSTejun Heo #define BLKG_STATS_DEBUG_CLEAR_START \ 161997a026cSTejun Heo offsetof(struct blkio_group_stats, unaccounted_time) 162997a026cSTejun Heo #define BLKG_STATS_DEBUG_CLEAR_SIZE \ 163997a026cSTejun Heo (offsetof(struct blkio_group_stats, start_group_wait_time) - \ 164997a026cSTejun Heo BLKG_STATS_DEBUG_CLEAR_START) 165997a026cSTejun Heo #endif 166997a026cSTejun Heo 1675624a4e4SVivek Goyal /* Per cpu blkio group stats */ 1685624a4e4SVivek Goyal struct blkio_group_stats_cpu { 1695624a4e4SVivek Goyal uint64_t sectors; 1705624a4e4SVivek Goyal uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL]; 171575969a0SVivek Goyal struct u64_stats_sync syncp; 1725624a4e4SVivek Goyal }; 1735624a4e4SVivek Goyal 174e56da7e2STejun Heo struct blkio_group_conf { 175e56da7e2STejun Heo unsigned int weight; 176e56da7e2STejun Heo unsigned int iops[2]; 177e56da7e2STejun Heo u64 bps[2]; 178e56da7e2STejun Heo }; 179e56da7e2STejun Heo 1800381411eSTejun Heo /* per-blkg per-policy data */ 1810381411eSTejun Heo struct blkg_policy_data { 1820381411eSTejun Heo /* the blkg this per-policy data belongs to */ 1830381411eSTejun Heo struct blkio_group *blkg; 1840381411eSTejun Heo 185549d3aa8STejun Heo /* Configuration */ 186549d3aa8STejun Heo struct blkio_group_conf conf; 187549d3aa8STejun Heo 188549d3aa8STejun Heo struct blkio_group_stats stats; 189549d3aa8STejun Heo /* Per cpu stats pointer */ 190549d3aa8STejun Heo struct blkio_group_stats_cpu __percpu *stats_cpu; 191549d3aa8STejun Heo 1920381411eSTejun Heo /* pol->pdata_size bytes of private data used by policy impl */ 1930381411eSTejun Heo char pdata[] __aligned(__alignof__(unsigned long long)); 1940381411eSTejun Heo }; 1950381411eSTejun Heo 19631e4c28dSVivek Goyal struct blkio_group { 197c875f4d0STejun Heo /* Pointer to the associated request_queue */ 198c875f4d0STejun Heo struct request_queue *q; 199e8989faeSTejun Heo struct list_head q_node; 20031e4c28dSVivek Goyal struct hlist_node blkcg_node; 2017ee9c562STejun Heo struct blkio_cgroup *blkcg; 2022868ef7bSVivek Goyal /* Store cgroup path */ 2032868ef7bSVivek Goyal char path[128]; 2041adaf3ddSTejun Heo /* reference count */ 2051adaf3ddSTejun Heo int refcnt; 20622084190SVivek Goyal 207549d3aa8STejun Heo struct blkg_policy_data *pd[BLKIO_NR_POLICIES]; 2081adaf3ddSTejun Heo 2091cd9e039SVivek Goyal /* List of blkg waiting for per cpu stats memory to be allocated */ 2101cd9e039SVivek Goyal struct list_head alloc_node; 2111adaf3ddSTejun Heo struct rcu_head rcu_head; 21231e4c28dSVivek Goyal }; 21331e4c28dSVivek Goyal 2140381411eSTejun Heo typedef void (blkio_init_group_fn)(struct blkio_group *blkg); 215ca32aefcSTejun Heo typedef void (blkio_update_group_weight_fn)(struct request_queue *q, 216fe071437SVivek Goyal struct blkio_group *blkg, unsigned int weight); 217ca32aefcSTejun Heo typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q, 218fe071437SVivek Goyal struct blkio_group *blkg, u64 read_bps); 219ca32aefcSTejun Heo typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q, 220fe071437SVivek Goyal struct blkio_group *blkg, u64 write_bps); 221ca32aefcSTejun Heo typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q, 222fe071437SVivek Goyal struct blkio_group *blkg, unsigned int read_iops); 223ca32aefcSTejun Heo typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q, 224fe071437SVivek Goyal struct blkio_group *blkg, unsigned int write_iops); 2253e252066SVivek Goyal 2263e252066SVivek Goyal struct blkio_policy_ops { 2270381411eSTejun Heo blkio_init_group_fn *blkio_init_group_fn; 2283e252066SVivek Goyal blkio_update_group_weight_fn *blkio_update_group_weight_fn; 2294c9eefa1SVivek Goyal blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn; 2304c9eefa1SVivek Goyal blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn; 2317702e8f4SVivek Goyal blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn; 2327702e8f4SVivek Goyal blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn; 2333e252066SVivek Goyal }; 2343e252066SVivek Goyal 2353e252066SVivek Goyal struct blkio_policy_type { 2363e252066SVivek Goyal struct list_head list; 2373e252066SVivek Goyal struct blkio_policy_ops ops; 238062a644dSVivek Goyal enum blkio_policy_id plid; 2390381411eSTejun Heo size_t pdata_size; /* policy specific private data size */ 2403e252066SVivek Goyal }; 2413e252066SVivek Goyal 2425efd6113STejun Heo extern int blkcg_init_queue(struct request_queue *q); 2435efd6113STejun Heo extern void blkcg_drain_queue(struct request_queue *q); 2445efd6113STejun Heo extern void blkcg_exit_queue(struct request_queue *q); 2455efd6113STejun Heo 2463e252066SVivek Goyal /* Blkio controller policy registration */ 2473e252066SVivek Goyal extern void blkio_policy_register(struct blkio_policy_type *); 2483e252066SVivek Goyal extern void blkio_policy_unregister(struct blkio_policy_type *); 249e8989faeSTejun Heo extern void blkg_destroy_all(struct request_queue *q, bool destroy_root); 250e8989faeSTejun Heo extern void update_root_blkg_pd(struct request_queue *q, 251e8989faeSTejun Heo enum blkio_policy_id plid); 2523e252066SVivek Goyal 2530381411eSTejun Heo /** 2540381411eSTejun Heo * blkg_to_pdata - get policy private data 2550381411eSTejun Heo * @blkg: blkg of interest 2560381411eSTejun Heo * @pol: policy of interest 2570381411eSTejun Heo * 2580381411eSTejun Heo * Return pointer to private data associated with the @blkg-@pol pair. 2590381411eSTejun Heo */ 2600381411eSTejun Heo static inline void *blkg_to_pdata(struct blkio_group *blkg, 2610381411eSTejun Heo struct blkio_policy_type *pol) 2620381411eSTejun Heo { 263549d3aa8STejun Heo return blkg ? blkg->pd[pol->plid]->pdata : NULL; 2640381411eSTejun Heo } 2650381411eSTejun Heo 2660381411eSTejun Heo /** 2670381411eSTejun Heo * pdata_to_blkg - get blkg associated with policy private data 2680381411eSTejun Heo * @pdata: policy private data of interest 2690381411eSTejun Heo * 270*aaec55a0STejun Heo * @pdata is policy private data. Determine the blkg it's associated with. 2710381411eSTejun Heo */ 272*aaec55a0STejun Heo static inline struct blkio_group *pdata_to_blkg(void *pdata) 2730381411eSTejun Heo { 2740381411eSTejun Heo if (pdata) { 2750381411eSTejun Heo struct blkg_policy_data *pd = 2760381411eSTejun Heo container_of(pdata, struct blkg_policy_data, pdata); 2770381411eSTejun Heo return pd->blkg; 2780381411eSTejun Heo } 2790381411eSTejun Heo return NULL; 2800381411eSTejun Heo } 2810381411eSTejun Heo 282afc24d49SVivek Goyal static inline char *blkg_path(struct blkio_group *blkg) 283afc24d49SVivek Goyal { 284afc24d49SVivek Goyal return blkg->path; 285afc24d49SVivek Goyal } 286afc24d49SVivek Goyal 2871adaf3ddSTejun Heo /** 2881adaf3ddSTejun Heo * blkg_get - get a blkg reference 2891adaf3ddSTejun Heo * @blkg: blkg to get 2901adaf3ddSTejun Heo * 2911adaf3ddSTejun Heo * The caller should be holding queue_lock and an existing reference. 2921adaf3ddSTejun Heo */ 2931adaf3ddSTejun Heo static inline void blkg_get(struct blkio_group *blkg) 2941adaf3ddSTejun Heo { 2951adaf3ddSTejun Heo lockdep_assert_held(blkg->q->queue_lock); 2961adaf3ddSTejun Heo WARN_ON_ONCE(!blkg->refcnt); 2971adaf3ddSTejun Heo blkg->refcnt++; 2981adaf3ddSTejun Heo } 2991adaf3ddSTejun Heo 3001adaf3ddSTejun Heo void __blkg_release(struct blkio_group *blkg); 3011adaf3ddSTejun Heo 3021adaf3ddSTejun Heo /** 3031adaf3ddSTejun Heo * blkg_put - put a blkg reference 3041adaf3ddSTejun Heo * @blkg: blkg to put 3051adaf3ddSTejun Heo * 3061adaf3ddSTejun Heo * The caller should be holding queue_lock. 3071adaf3ddSTejun Heo */ 3081adaf3ddSTejun Heo static inline void blkg_put(struct blkio_group *blkg) 3091adaf3ddSTejun Heo { 3101adaf3ddSTejun Heo lockdep_assert_held(blkg->q->queue_lock); 3111adaf3ddSTejun Heo WARN_ON_ONCE(blkg->refcnt <= 0); 3121adaf3ddSTejun Heo if (!--blkg->refcnt) 3131adaf3ddSTejun Heo __blkg_release(blkg); 3141adaf3ddSTejun Heo } 3151adaf3ddSTejun Heo 3162f5ea477SJens Axboe #else 3172f5ea477SJens Axboe 3182f5ea477SJens Axboe struct blkio_group { 3192f5ea477SJens Axboe }; 3202f5ea477SJens Axboe 3213e252066SVivek Goyal struct blkio_policy_type { 3223e252066SVivek Goyal }; 3233e252066SVivek Goyal 3245efd6113STejun Heo static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 3255efd6113STejun Heo static inline void blkcg_drain_queue(struct request_queue *q) { } 3265efd6113STejun Heo static inline void blkcg_exit_queue(struct request_queue *q) { } 3273e252066SVivek Goyal static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { } 3283e252066SVivek Goyal static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } 32903aa264aSTejun Heo static inline void blkg_destroy_all(struct request_queue *q, 33003aa264aSTejun Heo bool destory_root) { } 331e8989faeSTejun Heo static inline void update_root_blkg_pd(struct request_queue *q, 332e8989faeSTejun Heo enum blkio_policy_id plid) { } 3333e252066SVivek Goyal 3340381411eSTejun Heo static inline void *blkg_to_pdata(struct blkio_group *blkg, 3350381411eSTejun Heo struct blkio_policy_type *pol) { return NULL; } 3360381411eSTejun Heo static inline struct blkio_group *pdata_to_blkg(void *pdata, 3370381411eSTejun Heo struct blkio_policy_type *pol) { return NULL; } 338afc24d49SVivek Goyal static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } 3391adaf3ddSTejun Heo static inline void blkg_get(struct blkio_group *blkg) { } 3401adaf3ddSTejun Heo static inline void blkg_put(struct blkio_group *blkg) { } 341afc24d49SVivek Goyal 3422f5ea477SJens Axboe #endif 3432f5ea477SJens Axboe 344df457f84SJustin TerAvest #define BLKIO_WEIGHT_MIN 10 34531e4c28dSVivek Goyal #define BLKIO_WEIGHT_MAX 1000 34631e4c28dSVivek Goyal #define BLKIO_WEIGHT_DEFAULT 500 34731e4c28dSVivek Goyal 3482868ef7bSVivek Goyal #ifdef CONFIG_DEBUG_BLK_CGROUP 349c1768268STejun Heo void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, 350c1768268STejun Heo struct blkio_policy_type *pol); 3519195291eSDivyesh Shah void blkiocg_update_dequeue_stats(struct blkio_group *blkg, 352c1768268STejun Heo struct blkio_policy_type *pol, 35322084190SVivek Goyal unsigned long dequeue); 354c1768268STejun Heo void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, 355c1768268STejun Heo struct blkio_policy_type *pol); 356c1768268STejun Heo void blkiocg_update_idle_time_stats(struct blkio_group *blkg, 357c1768268STejun Heo struct blkio_policy_type *pol); 358c1768268STejun Heo void blkiocg_set_start_empty_time(struct blkio_group *blkg, 359c1768268STejun Heo struct blkio_policy_type *pol); 360812df48dSDivyesh Shah 361812df48dSDivyesh Shah #define BLKG_FLAG_FNS(name) \ 362812df48dSDivyesh Shah static inline void blkio_mark_blkg_##name( \ 363812df48dSDivyesh Shah struct blkio_group_stats *stats) \ 364812df48dSDivyesh Shah { \ 365812df48dSDivyesh Shah stats->flags |= (1 << BLKG_##name); \ 366812df48dSDivyesh Shah } \ 367812df48dSDivyesh Shah static inline void blkio_clear_blkg_##name( \ 368812df48dSDivyesh Shah struct blkio_group_stats *stats) \ 369812df48dSDivyesh Shah { \ 370812df48dSDivyesh Shah stats->flags &= ~(1 << BLKG_##name); \ 371812df48dSDivyesh Shah } \ 372812df48dSDivyesh Shah static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \ 373812df48dSDivyesh Shah { \ 374812df48dSDivyesh Shah return (stats->flags & (1 << BLKG_##name)) != 0; \ 375812df48dSDivyesh Shah } \ 376812df48dSDivyesh Shah 377812df48dSDivyesh Shah BLKG_FLAG_FNS(waiting) 378812df48dSDivyesh Shah BLKG_FLAG_FNS(idling) 379812df48dSDivyesh Shah BLKG_FLAG_FNS(empty) 380812df48dSDivyesh Shah #undef BLKG_FLAG_FNS 3812868ef7bSVivek Goyal #else 382c1768268STejun Heo static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, 383c1768268STejun Heo struct blkio_policy_type *pol) { } 3849195291eSDivyesh Shah static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg, 385c1768268STejun Heo struct blkio_policy_type *pol, unsigned long dequeue) { } 386c1768268STejun Heo static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, 387c1768268STejun Heo struct blkio_policy_type *pol) { } 388c1768268STejun Heo static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg, 389c1768268STejun Heo struct blkio_policy_type *pol) { } 390c1768268STejun Heo static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg, 391c1768268STejun Heo struct blkio_policy_type *pol) { } 3922868ef7bSVivek Goyal #endif 3932868ef7bSVivek Goyal 39432e380aeSTejun Heo #ifdef CONFIG_BLK_CGROUP 39531e4c28dSVivek Goyal extern struct blkio_cgroup blkio_root_cgroup; 39631e4c28dSVivek Goyal extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 3974f85cb96STejun Heo extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio); 398cd1604faSTejun Heo extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, 399e8989faeSTejun Heo struct request_queue *q); 400cd1604faSTejun Heo struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, 401cd1604faSTejun Heo struct request_queue *q, 402cd1604faSTejun Heo bool for_root); 403303a3acbSDivyesh Shah void blkiocg_update_timeslice_used(struct blkio_group *blkg, 404c1768268STejun Heo struct blkio_policy_type *pol, 405167400d3SJustin TerAvest unsigned long time, 406167400d3SJustin TerAvest unsigned long unaccounted_time); 407c1768268STejun Heo void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 408c1768268STejun Heo struct blkio_policy_type *pol, 409c1768268STejun Heo uint64_t bytes, bool direction, bool sync); 41084c124daSDivyesh Shah void blkiocg_update_completion_stats(struct blkio_group *blkg, 411c1768268STejun Heo struct blkio_policy_type *pol, 412c1768268STejun Heo uint64_t start_time, 413c1768268STejun Heo uint64_t io_start_time, bool direction, 414812d4026SDivyesh Shah bool sync); 415c1768268STejun Heo void blkiocg_update_io_merged_stats(struct blkio_group *blkg, 416c1768268STejun Heo struct blkio_policy_type *pol, 417c1768268STejun Heo bool direction, bool sync); 418a11cdaa7SDivyesh Shah void blkiocg_update_io_add_stats(struct blkio_group *blkg, 419c1768268STejun Heo struct blkio_policy_type *pol, 420c1768268STejun Heo struct blkio_group *curr_blkg, bool direction, 421c1768268STejun Heo bool sync); 422a11cdaa7SDivyesh Shah void blkiocg_update_io_remove_stats(struct blkio_group *blkg, 423c1768268STejun Heo struct blkio_policy_type *pol, 424cdc1184cSDivyesh Shah bool direction, bool sync); 42531e4c28dSVivek Goyal #else 4262f5ea477SJens Axboe struct cgroup; 42731e4c28dSVivek Goyal static inline struct blkio_cgroup * 42831e4c28dSVivek Goyal cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } 42970087dc3SVivek Goyal static inline struct blkio_cgroup * 4304f85cb96STejun Heo bio_blkio_cgroup(struct bio *bio) { return NULL; } 43131e4c28dSVivek Goyal 432cd1604faSTejun Heo static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, 433cd1604faSTejun Heo void *key) { return NULL; } 434303a3acbSDivyesh Shah static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, 435c1768268STejun Heo struct blkio_policy_type *pol, unsigned long time, 436c1768268STejun Heo unsigned long unaccounted_time) { } 43784c124daSDivyesh Shah static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 438c1768268STejun Heo struct blkio_policy_type *pol, uint64_t bytes, 439c1768268STejun Heo bool direction, bool sync) { } 44084c124daSDivyesh Shah static inline void blkiocg_update_completion_stats(struct blkio_group *blkg, 441c1768268STejun Heo struct blkio_policy_type *pol, uint64_t start_time, 442c1768268STejun Heo uint64_t io_start_time, bool direction, bool sync) { } 443812d4026SDivyesh Shah static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg, 444c1768268STejun Heo struct blkio_policy_type *pol, bool direction, 445c1768268STejun Heo bool sync) { } 446a11cdaa7SDivyesh Shah static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg, 447c1768268STejun Heo struct blkio_policy_type *pol, 448c1768268STejun Heo struct blkio_group *curr_blkg, bool direction, 449c1768268STejun Heo bool sync) { } 450a11cdaa7SDivyesh Shah static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg, 451c1768268STejun Heo struct blkio_policy_type *pol, bool direction, 452c1768268STejun Heo bool sync) { } 45331e4c28dSVivek Goyal #endif 45431e4c28dSVivek Goyal #endif /* _BLK_CGROUP_H */ 455