1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to sysfs handling
4 */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22
23 struct queue_sysfs_entry {
24 struct attribute attr;
25 ssize_t (*show)(struct gendisk *disk, char *page);
26 ssize_t (*show_limit)(struct gendisk *disk, char *page);
27
28 ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
29 int (*store_limit)(struct gendisk *disk, const char *page,
30 size_t count, struct queue_limits *lim);
31 };
32
33 static ssize_t
queue_var_show(unsigned long var,char * page)34 queue_var_show(unsigned long var, char *page)
35 {
36 return sysfs_emit(page, "%lu\n", var);
37 }
38
39 static ssize_t
queue_var_store(unsigned long * var,const char * page,size_t count)40 queue_var_store(unsigned long *var, const char *page, size_t count)
41 {
42 int err;
43 unsigned long v;
44
45 err = kstrtoul(page, 10, &v);
46 if (err || v > UINT_MAX)
47 return -EINVAL;
48
49 *var = v;
50
51 return count;
52 }
53
queue_requests_show(struct gendisk * disk,char * page)54 static ssize_t queue_requests_show(struct gendisk *disk, char *page)
55 {
56 ssize_t ret;
57
58 mutex_lock(&disk->queue->elevator_lock);
59 ret = queue_var_show(disk->queue->nr_requests, page);
60 mutex_unlock(&disk->queue->elevator_lock);
61 return ret;
62 }
63
64 static ssize_t
queue_requests_store(struct gendisk * disk,const char * page,size_t count)65 queue_requests_store(struct gendisk *disk, const char *page, size_t count)
66 {
67 unsigned long nr;
68 int ret, err;
69 unsigned int memflags;
70 struct request_queue *q = disk->queue;
71
72 if (!queue_is_mq(q))
73 return -EINVAL;
74
75 ret = queue_var_store(&nr, page, count);
76 if (ret < 0)
77 return ret;
78
79 memflags = blk_mq_freeze_queue(q);
80 mutex_lock(&q->elevator_lock);
81 if (nr < BLKDEV_MIN_RQ)
82 nr = BLKDEV_MIN_RQ;
83
84 err = blk_mq_update_nr_requests(disk->queue, nr);
85 if (err)
86 ret = err;
87 mutex_unlock(&q->elevator_lock);
88 blk_mq_unfreeze_queue(q, memflags);
89 return ret;
90 }
91
queue_ra_show(struct gendisk * disk,char * page)92 static ssize_t queue_ra_show(struct gendisk *disk, char *page)
93 {
94 ssize_t ret;
95
96 mutex_lock(&disk->queue->limits_lock);
97 ret = queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
98 mutex_unlock(&disk->queue->limits_lock);
99
100 return ret;
101 }
102
103 static ssize_t
queue_ra_store(struct gendisk * disk,const char * page,size_t count)104 queue_ra_store(struct gendisk *disk, const char *page, size_t count)
105 {
106 unsigned long ra_kb;
107 ssize_t ret;
108 unsigned int memflags;
109 struct request_queue *q = disk->queue;
110
111 ret = queue_var_store(&ra_kb, page, count);
112 if (ret < 0)
113 return ret;
114 /*
115 * ->ra_pages is protected by ->limits_lock because it is usually
116 * calculated from the queue limits by queue_limits_commit_update.
117 */
118 mutex_lock(&q->limits_lock);
119 memflags = blk_mq_freeze_queue(q);
120 disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
121 mutex_unlock(&q->limits_lock);
122 blk_mq_unfreeze_queue(q, memflags);
123
124 return ret;
125 }
126
127 #define QUEUE_SYSFS_LIMIT_SHOW(_field) \
128 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
129 { \
130 return queue_var_show(disk->queue->limits._field, page); \
131 }
132
133 QUEUE_SYSFS_LIMIT_SHOW(max_segments)
QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)134 QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
135 QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
136 QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
137 QUEUE_SYSFS_LIMIT_SHOW(max_write_streams)
138 QUEUE_SYSFS_LIMIT_SHOW(write_stream_granularity)
139 QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
140 QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
141 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
142 QUEUE_SYSFS_LIMIT_SHOW(io_min)
143 QUEUE_SYSFS_LIMIT_SHOW(io_opt)
144 QUEUE_SYSFS_LIMIT_SHOW(discard_granularity)
145 QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity)
146 QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask)
147 QUEUE_SYSFS_LIMIT_SHOW(dma_alignment)
148 QUEUE_SYSFS_LIMIT_SHOW(max_open_zones)
149 QUEUE_SYSFS_LIMIT_SHOW(max_active_zones)
150 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min)
151 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
152
153 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field) \
154 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
155 { \
156 return sysfs_emit(page, "%llu\n", \
157 (unsigned long long)disk->queue->limits._field << \
158 SECTOR_SHIFT); \
159 }
160
161 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors)
162 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
163 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
164 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_wzeroes_unmap_sectors)
165 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_wzeroes_unmap_sectors)
166 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
167 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
168 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors)
169
170 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field) \
171 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
172 { \
173 return queue_var_show(disk->queue->limits._field >> 1, page); \
174 }
175
176 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors)
177 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
178
179 #define QUEUE_SYSFS_SHOW_CONST(_name, _val) \
180 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
181 { \
182 return sysfs_emit(page, "%d\n", _val); \
183 }
184
185 /* deprecated fields */
186 QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
187 QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
188 QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
189
190 static int queue_max_discard_sectors_store(struct gendisk *disk,
191 const char *page, size_t count, struct queue_limits *lim)
192 {
193 unsigned long max_discard_bytes;
194 ssize_t ret;
195
196 ret = queue_var_store(&max_discard_bytes, page, count);
197 if (ret < 0)
198 return ret;
199
200 if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
201 return -EINVAL;
202
203 if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
204 return -EINVAL;
205
206 lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
207 return 0;
208 }
209
queue_max_wzeroes_unmap_sectors_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)210 static int queue_max_wzeroes_unmap_sectors_store(struct gendisk *disk,
211 const char *page, size_t count, struct queue_limits *lim)
212 {
213 unsigned long max_zeroes_bytes, max_hw_zeroes_bytes;
214 ssize_t ret;
215
216 ret = queue_var_store(&max_zeroes_bytes, page, count);
217 if (ret < 0)
218 return ret;
219
220 max_hw_zeroes_bytes = lim->max_hw_wzeroes_unmap_sectors << SECTOR_SHIFT;
221 if (max_zeroes_bytes != 0 && max_zeroes_bytes != max_hw_zeroes_bytes)
222 return -EINVAL;
223
224 lim->max_user_wzeroes_unmap_sectors = max_zeroes_bytes >> SECTOR_SHIFT;
225 return 0;
226 }
227
228 static int
queue_max_sectors_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)229 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count,
230 struct queue_limits *lim)
231 {
232 unsigned long max_sectors_kb;
233 ssize_t ret;
234
235 ret = queue_var_store(&max_sectors_kb, page, count);
236 if (ret < 0)
237 return ret;
238
239 lim->max_user_sectors = max_sectors_kb << 1;
240 return 0;
241 }
242
queue_feature_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim,blk_features_t feature)243 static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
244 size_t count, struct queue_limits *lim, blk_features_t feature)
245 {
246 unsigned long val;
247 ssize_t ret;
248
249 ret = queue_var_store(&val, page, count);
250 if (ret < 0)
251 return ret;
252
253 if (val)
254 lim->features |= feature;
255 else
256 lim->features &= ~feature;
257 return 0;
258 }
259
260 #define QUEUE_SYSFS_FEATURE(_name, _feature) \
261 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
262 { \
263 return sysfs_emit(page, "%u\n", \
264 !!(disk->queue->limits.features & _feature)); \
265 } \
266 static int queue_##_name##_store(struct gendisk *disk, \
267 const char *page, size_t count, struct queue_limits *lim) \
268 { \
269 return queue_feature_store(disk, page, count, lim, _feature); \
270 }
271
272 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
273 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
274 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
275 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
276
277 #define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature) \
278 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
279 { \
280 return sysfs_emit(page, "%u\n", \
281 !!(disk->queue->limits.features & _feature)); \
282 }
283
284 QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
285 QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
286
queue_poll_show(struct gendisk * disk,char * page)287 static ssize_t queue_poll_show(struct gendisk *disk, char *page)
288 {
289 if (queue_is_mq(disk->queue))
290 return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue));
291
292 return sysfs_emit(page, "%u\n",
293 !!(disk->queue->limits.features & BLK_FEAT_POLL));
294 }
295
queue_zoned_show(struct gendisk * disk,char * page)296 static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
297 {
298 if (blk_queue_is_zoned(disk->queue))
299 return sysfs_emit(page, "host-managed\n");
300 return sysfs_emit(page, "none\n");
301 }
302
queue_nr_zones_show(struct gendisk * disk,char * page)303 static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
304 {
305 return queue_var_show(disk_nr_zones(disk), page);
306 }
307
queue_iostats_passthrough_show(struct gendisk * disk,char * page)308 static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
309 {
310 return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page);
311 }
312
queue_iostats_passthrough_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)313 static int queue_iostats_passthrough_store(struct gendisk *disk,
314 const char *page, size_t count, struct queue_limits *lim)
315 {
316 unsigned long ios;
317 ssize_t ret;
318
319 ret = queue_var_store(&ios, page, count);
320 if (ret < 0)
321 return ret;
322
323 if (ios)
324 lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH;
325 else
326 lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH;
327 return 0;
328 }
329
queue_nomerges_show(struct gendisk * disk,char * page)330 static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
331 {
332 return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
333 blk_queue_noxmerges(disk->queue), page);
334 }
335
queue_nomerges_store(struct gendisk * disk,const char * page,size_t count)336 static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
337 size_t count)
338 {
339 unsigned long nm;
340 unsigned int memflags;
341 struct request_queue *q = disk->queue;
342 ssize_t ret = queue_var_store(&nm, page, count);
343
344 if (ret < 0)
345 return ret;
346
347 memflags = blk_mq_freeze_queue(q);
348 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
349 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
350 if (nm == 2)
351 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
352 else if (nm)
353 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
354 blk_mq_unfreeze_queue(q, memflags);
355
356 return ret;
357 }
358
queue_rq_affinity_show(struct gendisk * disk,char * page)359 static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page)
360 {
361 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags);
362 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags);
363
364 return queue_var_show(set << force, page);
365 }
366
367 static ssize_t
queue_rq_affinity_store(struct gendisk * disk,const char * page,size_t count)368 queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
369 {
370 ssize_t ret = -EINVAL;
371 #ifdef CONFIG_SMP
372 struct request_queue *q = disk->queue;
373 unsigned long val;
374 unsigned int memflags;
375
376 ret = queue_var_store(&val, page, count);
377 if (ret < 0)
378 return ret;
379
380 /*
381 * Here we update two queue flags each using atomic bitops, although
382 * updating two flags isn't atomic it should be harmless as those flags
383 * are accessed individually using atomic test_bit operation. So we
384 * don't grab any lock while updating these flags.
385 */
386 memflags = blk_mq_freeze_queue(q);
387 if (val == 2) {
388 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
389 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
390 } else if (val == 1) {
391 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
392 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
393 } else if (val == 0) {
394 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
395 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
396 }
397 blk_mq_unfreeze_queue(q, memflags);
398 #endif
399 return ret;
400 }
401
queue_poll_delay_store(struct gendisk * disk,const char * page,size_t count)402 static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
403 size_t count)
404 {
405 return count;
406 }
407
queue_poll_store(struct gendisk * disk,const char * page,size_t count)408 static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
409 size_t count)
410 {
411 unsigned int memflags;
412 ssize_t ret = count;
413 struct request_queue *q = disk->queue;
414
415 memflags = blk_mq_freeze_queue(q);
416 if (!(q->limits.features & BLK_FEAT_POLL)) {
417 ret = -EINVAL;
418 goto out;
419 }
420
421 pr_info_ratelimited("writes to the poll attribute are ignored.\n");
422 pr_info_ratelimited("please use driver specific parameters instead.\n");
423 out:
424 blk_mq_unfreeze_queue(q, memflags);
425 return ret;
426 }
427
queue_io_timeout_show(struct gendisk * disk,char * page)428 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
429 {
430 return sysfs_emit(page, "%u\n",
431 jiffies_to_msecs(READ_ONCE(disk->queue->rq_timeout)));
432 }
433
queue_io_timeout_store(struct gendisk * disk,const char * page,size_t count)434 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
435 size_t count)
436 {
437 unsigned int val, memflags;
438 int err;
439 struct request_queue *q = disk->queue;
440
441 err = kstrtou32(page, 10, &val);
442 if (err || val == 0)
443 return -EINVAL;
444
445 memflags = blk_mq_freeze_queue(q);
446 blk_queue_rq_timeout(q, msecs_to_jiffies(val));
447 blk_mq_unfreeze_queue(q, memflags);
448
449 return count;
450 }
451
queue_wc_show(struct gendisk * disk,char * page)452 static ssize_t queue_wc_show(struct gendisk *disk, char *page)
453 {
454 if (blk_queue_write_cache(disk->queue))
455 return sysfs_emit(page, "write back\n");
456 return sysfs_emit(page, "write through\n");
457 }
458
queue_wc_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)459 static int queue_wc_store(struct gendisk *disk, const char *page,
460 size_t count, struct queue_limits *lim)
461 {
462 bool disable;
463
464 if (!strncmp(page, "write back", 10)) {
465 disable = false;
466 } else if (!strncmp(page, "write through", 13) ||
467 !strncmp(page, "none", 4)) {
468 disable = true;
469 } else {
470 return -EINVAL;
471 }
472
473 if (disable)
474 lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
475 else
476 lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
477 return 0;
478 }
479
480 #define QUEUE_RO_ENTRY(_prefix, _name) \
481 static struct queue_sysfs_entry _prefix##_entry = { \
482 .attr = { .name = _name, .mode = 0444 }, \
483 .show = _prefix##_show, \
484 };
485
486 #define QUEUE_RW_ENTRY(_prefix, _name) \
487 static struct queue_sysfs_entry _prefix##_entry = { \
488 .attr = { .name = _name, .mode = 0644 }, \
489 .show = _prefix##_show, \
490 .store = _prefix##_store, \
491 };
492
493 #define QUEUE_LIM_RO_ENTRY(_prefix, _name) \
494 static struct queue_sysfs_entry _prefix##_entry = { \
495 .attr = { .name = _name, .mode = 0444 }, \
496 .show_limit = _prefix##_show, \
497 }
498
499 #define QUEUE_LIM_RW_ENTRY(_prefix, _name) \
500 static struct queue_sysfs_entry _prefix##_entry = { \
501 .attr = { .name = _name, .mode = 0644 }, \
502 .show_limit = _prefix##_show, \
503 .store_limit = _prefix##_store, \
504 }
505
506 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
507 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
508 QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
509 QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
510 QUEUE_LIM_RO_ENTRY(queue_max_segments, "max_segments");
511 QUEUE_LIM_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
512 QUEUE_LIM_RO_ENTRY(queue_max_segment_size, "max_segment_size");
513 QUEUE_LIM_RO_ENTRY(queue_max_write_streams, "max_write_streams");
514 QUEUE_LIM_RO_ENTRY(queue_write_stream_granularity, "write_stream_granularity");
515 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
516
517 QUEUE_LIM_RO_ENTRY(queue_logical_block_size, "logical_block_size");
518 QUEUE_LIM_RO_ENTRY(queue_physical_block_size, "physical_block_size");
519 QUEUE_LIM_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
520 QUEUE_LIM_RO_ENTRY(queue_io_min, "minimum_io_size");
521 QUEUE_LIM_RO_ENTRY(queue_io_opt, "optimal_io_size");
522
523 QUEUE_LIM_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
524 QUEUE_LIM_RO_ENTRY(queue_discard_granularity, "discard_granularity");
525 QUEUE_LIM_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
526 QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
527 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
528
529 QUEUE_LIM_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
530 QUEUE_LIM_RO_ENTRY(queue_atomic_write_boundary_sectors,
531 "atomic_write_boundary_bytes");
532 QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
533 QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
534
535 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
536 QUEUE_LIM_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
537 QUEUE_LIM_RO_ENTRY(queue_max_hw_wzeroes_unmap_sectors,
538 "write_zeroes_unmap_max_hw_bytes");
539 QUEUE_LIM_RW_ENTRY(queue_max_wzeroes_unmap_sectors,
540 "write_zeroes_unmap_max_bytes");
541 QUEUE_LIM_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
542 QUEUE_LIM_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
543
544 QUEUE_LIM_RO_ENTRY(queue_zoned, "zoned");
545 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
546 QUEUE_LIM_RO_ENTRY(queue_max_open_zones, "max_open_zones");
547 QUEUE_LIM_RO_ENTRY(queue_max_active_zones, "max_active_zones");
548
549 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
550 QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
551 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
552 QUEUE_RW_ENTRY(queue_poll, "io_poll");
553 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
554 QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache");
555 QUEUE_LIM_RO_ENTRY(queue_fua, "fua");
556 QUEUE_LIM_RO_ENTRY(queue_dax, "dax");
557 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
558 QUEUE_LIM_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
559 QUEUE_LIM_RO_ENTRY(queue_dma_alignment, "dma_alignment");
560
561 /* legacy alias for logical_block_size: */
562 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
563 .attr = {.name = "hw_sector_size", .mode = 0444 },
564 .show_limit = queue_logical_block_size_show,
565 };
566
567 QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational");
568 QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats");
569 QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random");
570 QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes");
571
572 #ifdef CONFIG_BLK_WBT
queue_var_store64(s64 * var,const char * page)573 static ssize_t queue_var_store64(s64 *var, const char *page)
574 {
575 int err;
576 s64 v;
577
578 err = kstrtos64(page, 10, &v);
579 if (err < 0)
580 return err;
581
582 *var = v;
583 return 0;
584 }
585
queue_wb_lat_show(struct gendisk * disk,char * page)586 static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
587 {
588 ssize_t ret;
589 struct request_queue *q = disk->queue;
590
591 mutex_lock(&disk->rqos_state_mutex);
592 if (!wbt_rq_qos(q)) {
593 ret = -EINVAL;
594 goto out;
595 }
596
597 if (wbt_disabled(q)) {
598 ret = sysfs_emit(page, "0\n");
599 goto out;
600 }
601
602 ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
603 out:
604 mutex_unlock(&disk->rqos_state_mutex);
605 return ret;
606 }
607
queue_wb_lat_store(struct gendisk * disk,const char * page,size_t count)608 static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
609 size_t count)
610 {
611 struct request_queue *q = disk->queue;
612 struct rq_qos *rqos;
613 ssize_t ret;
614 s64 val;
615 unsigned int memflags;
616
617 ret = queue_var_store64(&val, page);
618 if (ret < 0)
619 return ret;
620 if (val < -1)
621 return -EINVAL;
622
623 memflags = blk_mq_freeze_queue(q);
624
625 rqos = wbt_rq_qos(q);
626 if (!rqos) {
627 ret = wbt_init(disk);
628 if (ret)
629 goto out;
630 }
631
632 ret = count;
633 if (val == -1)
634 val = wbt_default_latency_nsec(q);
635 else if (val >= 0)
636 val *= 1000ULL;
637
638 if (wbt_get_min_lat(q) == val)
639 goto out;
640
641 /*
642 * Ensure that the queue is idled, in case the latency update
643 * ends up either enabling or disabling wbt completely. We can't
644 * have IO inflight if that happens.
645 */
646 blk_mq_quiesce_queue(q);
647
648 mutex_lock(&disk->rqos_state_mutex);
649 wbt_set_min_lat(q, val);
650 mutex_unlock(&disk->rqos_state_mutex);
651
652 blk_mq_unquiesce_queue(q);
653 out:
654 blk_mq_unfreeze_queue(q, memflags);
655
656 return ret;
657 }
658
659 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
660 #endif
661
662 /* Common attributes for bio-based and request-based queues. */
663 static struct attribute *queue_attrs[] = {
664 /*
665 * Attributes which are protected with q->limits_lock.
666 */
667 &queue_max_hw_sectors_entry.attr,
668 &queue_max_sectors_entry.attr,
669 &queue_max_segments_entry.attr,
670 &queue_max_discard_segments_entry.attr,
671 &queue_max_integrity_segments_entry.attr,
672 &queue_max_segment_size_entry.attr,
673 &queue_max_write_streams_entry.attr,
674 &queue_write_stream_granularity_entry.attr,
675 &queue_hw_sector_size_entry.attr,
676 &queue_logical_block_size_entry.attr,
677 &queue_physical_block_size_entry.attr,
678 &queue_chunk_sectors_entry.attr,
679 &queue_io_min_entry.attr,
680 &queue_io_opt_entry.attr,
681 &queue_discard_granularity_entry.attr,
682 &queue_max_discard_sectors_entry.attr,
683 &queue_max_hw_discard_sectors_entry.attr,
684 &queue_atomic_write_max_sectors_entry.attr,
685 &queue_atomic_write_boundary_sectors_entry.attr,
686 &queue_atomic_write_unit_min_entry.attr,
687 &queue_atomic_write_unit_max_entry.attr,
688 &queue_max_write_zeroes_sectors_entry.attr,
689 &queue_max_hw_wzeroes_unmap_sectors_entry.attr,
690 &queue_max_wzeroes_unmap_sectors_entry.attr,
691 &queue_max_zone_append_sectors_entry.attr,
692 &queue_zone_write_granularity_entry.attr,
693 &queue_rotational_entry.attr,
694 &queue_zoned_entry.attr,
695 &queue_max_open_zones_entry.attr,
696 &queue_max_active_zones_entry.attr,
697 &queue_iostats_passthrough_entry.attr,
698 &queue_iostats_entry.attr,
699 &queue_stable_writes_entry.attr,
700 &queue_add_random_entry.attr,
701 &queue_wc_entry.attr,
702 &queue_fua_entry.attr,
703 &queue_dax_entry.attr,
704 &queue_virt_boundary_mask_entry.attr,
705 &queue_dma_alignment_entry.attr,
706 &queue_ra_entry.attr,
707
708 /*
709 * Attributes which don't require locking.
710 */
711 &queue_discard_zeroes_data_entry.attr,
712 &queue_write_same_max_entry.attr,
713 &queue_nr_zones_entry.attr,
714 &queue_nomerges_entry.attr,
715 &queue_poll_entry.attr,
716 &queue_poll_delay_entry.attr,
717
718 NULL,
719 };
720
721 /* Request-based queue attributes that are not relevant for bio-based queues. */
722 static struct attribute *blk_mq_queue_attrs[] = {
723 /*
724 * Attributes which require some form of locking other than
725 * q->sysfs_lock.
726 */
727 &elv_iosched_entry.attr,
728 &queue_requests_entry.attr,
729 #ifdef CONFIG_BLK_WBT
730 &queue_wb_lat_entry.attr,
731 #endif
732 /*
733 * Attributes which don't require locking.
734 */
735 &queue_rq_affinity_entry.attr,
736 &queue_io_timeout_entry.attr,
737
738 NULL,
739 };
740
queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)741 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
742 int n)
743 {
744 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
745 struct request_queue *q = disk->queue;
746
747 if ((attr == &queue_max_open_zones_entry.attr ||
748 attr == &queue_max_active_zones_entry.attr) &&
749 !blk_queue_is_zoned(q))
750 return 0;
751
752 return attr->mode;
753 }
754
blk_mq_queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)755 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
756 struct attribute *attr, int n)
757 {
758 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
759 struct request_queue *q = disk->queue;
760
761 if (!queue_is_mq(q))
762 return 0;
763
764 if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
765 return 0;
766
767 return attr->mode;
768 }
769
770 static struct attribute_group queue_attr_group = {
771 .attrs = queue_attrs,
772 .is_visible = queue_attr_visible,
773 };
774
775 static struct attribute_group blk_mq_queue_attr_group = {
776 .attrs = blk_mq_queue_attrs,
777 .is_visible = blk_mq_queue_attr_visible,
778 };
779
780 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
781
782 static ssize_t
queue_attr_show(struct kobject * kobj,struct attribute * attr,char * page)783 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
784 {
785 struct queue_sysfs_entry *entry = to_queue(attr);
786 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
787
788 if (!entry->show && !entry->show_limit)
789 return -EIO;
790
791 if (entry->show_limit) {
792 ssize_t res;
793
794 mutex_lock(&disk->queue->limits_lock);
795 res = entry->show_limit(disk, page);
796 mutex_unlock(&disk->queue->limits_lock);
797 return res;
798 }
799
800 return entry->show(disk, page);
801 }
802
803 static ssize_t
queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)804 queue_attr_store(struct kobject *kobj, struct attribute *attr,
805 const char *page, size_t length)
806 {
807 struct queue_sysfs_entry *entry = to_queue(attr);
808 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
809 struct request_queue *q = disk->queue;
810
811 if (!entry->store_limit && !entry->store)
812 return -EIO;
813
814 if (entry->store_limit) {
815 ssize_t res;
816
817 struct queue_limits lim = queue_limits_start_update(q);
818
819 res = entry->store_limit(disk, page, length, &lim);
820 if (res < 0) {
821 queue_limits_cancel_update(q);
822 return res;
823 }
824
825 res = queue_limits_commit_update_frozen(q, &lim);
826 if (res)
827 return res;
828 return length;
829 }
830
831 return entry->store(disk, page, length);
832 }
833
834 static const struct sysfs_ops queue_sysfs_ops = {
835 .show = queue_attr_show,
836 .store = queue_attr_store,
837 };
838
839 static const struct attribute_group *blk_queue_attr_groups[] = {
840 &queue_attr_group,
841 &blk_mq_queue_attr_group,
842 NULL
843 };
844
blk_queue_release(struct kobject * kobj)845 static void blk_queue_release(struct kobject *kobj)
846 {
847 /* nothing to do here, all data is associated with the parent gendisk */
848 }
849
850 const struct kobj_type blk_queue_ktype = {
851 .default_groups = blk_queue_attr_groups,
852 .sysfs_ops = &queue_sysfs_ops,
853 .release = blk_queue_release,
854 };
855
blk_debugfs_remove(struct gendisk * disk)856 static void blk_debugfs_remove(struct gendisk *disk)
857 {
858 struct request_queue *q = disk->queue;
859
860 mutex_lock(&q->debugfs_mutex);
861 blk_trace_shutdown(q);
862 debugfs_remove_recursive(q->debugfs_dir);
863 q->debugfs_dir = NULL;
864 q->sched_debugfs_dir = NULL;
865 q->rqos_debugfs_dir = NULL;
866 mutex_unlock(&q->debugfs_mutex);
867 }
868
869 /**
870 * blk_register_queue - register a block layer queue with sysfs
871 * @disk: Disk of which the request queue should be registered with sysfs.
872 */
blk_register_queue(struct gendisk * disk)873 int blk_register_queue(struct gendisk *disk)
874 {
875 struct request_queue *q = disk->queue;
876 int ret;
877
878 ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
879 if (ret < 0)
880 return ret;
881
882 if (queue_is_mq(q)) {
883 ret = blk_mq_sysfs_register(disk);
884 if (ret)
885 goto out_del_queue_kobj;
886 }
887 mutex_lock(&q->sysfs_lock);
888
889 mutex_lock(&q->debugfs_mutex);
890 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
891 if (queue_is_mq(q))
892 blk_mq_debugfs_register(q);
893 mutex_unlock(&q->debugfs_mutex);
894
895 ret = disk_register_independent_access_ranges(disk);
896 if (ret)
897 goto out_debugfs_remove;
898
899 ret = blk_crypto_sysfs_register(disk);
900 if (ret)
901 goto out_unregister_ia_ranges;
902
903 if (queue_is_mq(q))
904 elevator_set_default(q);
905
906 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
907 wbt_enable_default(disk);
908
909 /* Now everything is ready and send out KOBJ_ADD uevent */
910 kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
911 if (q->elevator)
912 kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
913 mutex_unlock(&q->sysfs_lock);
914
915 /*
916 * SCSI probing may synchronously create and destroy a lot of
917 * request_queues for non-existent devices. Shutting down a fully
918 * functional queue takes measureable wallclock time as RCU grace
919 * periods are involved. To avoid excessive latency in these
920 * cases, a request_queue starts out in a degraded mode which is
921 * faster to shut down and is made fully functional here as
922 * request_queues for non-existent devices never get registered.
923 */
924 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
925 percpu_ref_switch_to_percpu(&q->q_usage_counter);
926
927 return ret;
928
929 out_unregister_ia_ranges:
930 disk_unregister_independent_access_ranges(disk);
931 out_debugfs_remove:
932 blk_debugfs_remove(disk);
933 mutex_unlock(&q->sysfs_lock);
934 if (queue_is_mq(q))
935 blk_mq_sysfs_unregister(disk);
936 out_del_queue_kobj:
937 kobject_del(&disk->queue_kobj);
938 return ret;
939 }
940
941 /**
942 * blk_unregister_queue - counterpart of blk_register_queue()
943 * @disk: Disk of which the request queue should be unregistered from sysfs.
944 *
945 * Note: the caller is responsible for guaranteeing that this function is called
946 * after blk_register_queue() has finished.
947 */
blk_unregister_queue(struct gendisk * disk)948 void blk_unregister_queue(struct gendisk *disk)
949 {
950 struct request_queue *q = disk->queue;
951
952 if (WARN_ON(!q))
953 return;
954
955 /* Return early if disk->queue was never registered. */
956 if (!blk_queue_registered(q))
957 return;
958
959 /*
960 * Since sysfs_remove_dir() prevents adding new directory entries
961 * before removal of existing entries starts, protect against
962 * concurrent elv_iosched_store() calls.
963 */
964 mutex_lock(&q->sysfs_lock);
965 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
966 mutex_unlock(&q->sysfs_lock);
967
968 /*
969 * Remove the sysfs attributes before unregistering the queue data
970 * structures that can be modified through sysfs.
971 */
972 if (queue_is_mq(q))
973 blk_mq_sysfs_unregister(disk);
974 blk_crypto_sysfs_unregister(disk);
975
976 mutex_lock(&q->sysfs_lock);
977 disk_unregister_independent_access_ranges(disk);
978 mutex_unlock(&q->sysfs_lock);
979
980 /* Now that we've deleted all child objects, we can delete the queue. */
981 kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
982 kobject_del(&disk->queue_kobj);
983
984 if (queue_is_mq(q))
985 elevator_set_none(q);
986
987 blk_debugfs_remove(disk);
988 }
989