1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to setting various queue properties from drivers
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17
18 #include "blk.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
21
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23 {
24 WRITE_ONCE(q->rq_timeout, timeout);
25 }
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27
28 /**
29 * blk_set_stacking_limits - set default limits for stacking devices
30 * @lim: the queue_limits structure to reset
31 *
32 * Prepare queue limits for applying limits from underlying devices using
33 * blk_stack_limits().
34 */
blk_set_stacking_limits(struct queue_limits * lim)35 void blk_set_stacking_limits(struct queue_limits *lim)
36 {
37 memset(lim, 0, sizeof(*lim));
38 lim->logical_block_size = SECTOR_SIZE;
39 lim->physical_block_size = SECTOR_SIZE;
40 lim->io_min = SECTOR_SIZE;
41 lim->discard_granularity = SECTOR_SIZE;
42 lim->dma_alignment = SECTOR_SIZE - 1;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44
45 /* Inherit limits from component devices */
46 lim->max_segments = USHRT_MAX;
47 lim->max_discard_segments = USHRT_MAX;
48 lim->max_hw_sectors = UINT_MAX;
49 lim->max_segment_size = UINT_MAX;
50 lim->max_sectors = UINT_MAX;
51 lim->max_dev_sectors = UINT_MAX;
52 lim->max_write_zeroes_sectors = UINT_MAX;
53 lim->max_hw_zone_append_sectors = UINT_MAX;
54 lim->max_user_discard_sectors = UINT_MAX;
55 }
56 EXPORT_SYMBOL(blk_set_stacking_limits);
57
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)58 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 struct queue_limits *lim)
60 {
61 /*
62 * For read-ahead of large files to be effective, we need to read ahead
63 * at least twice the optimal I/O size.
64 *
65 * There is no hardware limitation for the read-ahead size and the user
66 * might have increased the read-ahead size through sysfs, so don't ever
67 * decrease it.
68 */
69 bdi->ra_pages = max3(bdi->ra_pages,
70 lim->io_opt * 2 / PAGE_SIZE,
71 VM_READAHEAD_PAGES);
72 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
73 }
74
blk_validate_zoned_limits(struct queue_limits * lim)75 static int blk_validate_zoned_limits(struct queue_limits *lim)
76 {
77 if (!(lim->features & BLK_FEAT_ZONED)) {
78 if (WARN_ON_ONCE(lim->max_open_zones) ||
79 WARN_ON_ONCE(lim->max_active_zones) ||
80 WARN_ON_ONCE(lim->zone_write_granularity) ||
81 WARN_ON_ONCE(lim->max_zone_append_sectors))
82 return -EINVAL;
83 return 0;
84 }
85
86 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
87 return -EINVAL;
88
89 /*
90 * Given that active zones include open zones, the maximum number of
91 * open zones cannot be larger than the maximum number of active zones.
92 */
93 if (lim->max_active_zones &&
94 lim->max_open_zones > lim->max_active_zones)
95 return -EINVAL;
96
97 if (lim->zone_write_granularity < lim->logical_block_size)
98 lim->zone_write_granularity = lim->logical_block_size;
99
100 /*
101 * The Zone Append size is limited by the maximum I/O size and the zone
102 * size given that it can't span zones.
103 *
104 * If no max_hw_zone_append_sectors limit is provided, the block layer
105 * will emulated it, else we're also bound by the hardware limit.
106 */
107 lim->max_zone_append_sectors =
108 min_not_zero(lim->max_hw_zone_append_sectors,
109 min(lim->chunk_sectors, lim->max_hw_sectors));
110 return 0;
111 }
112
blk_validate_integrity_limits(struct queue_limits * lim)113 static int blk_validate_integrity_limits(struct queue_limits *lim)
114 {
115 struct blk_integrity *bi = &lim->integrity;
116
117 if (!bi->tuple_size) {
118 if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
119 bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
120 pr_warn("invalid PI settings.\n");
121 return -EINVAL;
122 }
123 bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
124 return 0;
125 }
126
127 if (lim->features & BLK_FEAT_BOUNCE_HIGH) {
128 pr_warn("no bounce buffer support for integrity metadata\n");
129 return -EINVAL;
130 }
131
132 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
133 pr_warn("integrity support disabled.\n");
134 return -EINVAL;
135 }
136
137 if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
138 (bi->flags & BLK_INTEGRITY_REF_TAG)) {
139 pr_warn("ref tag not support without checksum.\n");
140 return -EINVAL;
141 }
142
143 if (!bi->interval_exp)
144 bi->interval_exp = ilog2(lim->logical_block_size);
145
146 return 0;
147 }
148
149 /*
150 * Returns max guaranteed bytes which we can fit in a bio.
151 *
152 * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
153 * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
154 * the first and last segments.
155 */
blk_queue_max_guaranteed_bio(struct queue_limits * lim)156 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
157 {
158 unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
159 unsigned int length;
160
161 length = min(max_segments, 2) * lim->logical_block_size;
162 if (max_segments > 2)
163 length += (max_segments - 2) * PAGE_SIZE;
164
165 return length;
166 }
167
blk_atomic_writes_update_limits(struct queue_limits * lim)168 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
169 {
170 unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
171 blk_queue_max_guaranteed_bio(lim));
172
173 unit_limit = rounddown_pow_of_two(unit_limit);
174
175 lim->atomic_write_max_sectors =
176 min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
177 lim->max_hw_sectors);
178 lim->atomic_write_unit_min =
179 min(lim->atomic_write_hw_unit_min, unit_limit);
180 lim->atomic_write_unit_max =
181 min(lim->atomic_write_hw_unit_max, unit_limit);
182 lim->atomic_write_boundary_sectors =
183 lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
184 }
185
blk_validate_atomic_write_limits(struct queue_limits * lim)186 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
187 {
188 unsigned int boundary_sectors;
189
190 if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
191 goto unsupported;
192
193 if (!lim->atomic_write_hw_max)
194 goto unsupported;
195
196 if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
197 goto unsupported;
198
199 if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
200 goto unsupported;
201
202 if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
203 lim->atomic_write_hw_unit_max))
204 goto unsupported;
205
206 if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
207 lim->atomic_write_hw_max))
208 goto unsupported;
209
210 boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
211
212 if (boundary_sectors) {
213 if (WARN_ON_ONCE(lim->atomic_write_hw_max >
214 lim->atomic_write_hw_boundary))
215 goto unsupported;
216 /*
217 * A feature of boundary support is that it disallows bios to
218 * be merged which would result in a merged request which
219 * crosses either a chunk sector or atomic write HW boundary,
220 * even though chunk sectors may be just set for performance.
221 * For simplicity, disallow atomic writes for a chunk sector
222 * which is non-zero and smaller than atomic write HW boundary.
223 * Furthermore, chunk sectors must be a multiple of atomic
224 * write HW boundary. Otherwise boundary support becomes
225 * complicated.
226 * Devices which do not conform to these rules can be dealt
227 * with if and when they show up.
228 */
229 if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
230 goto unsupported;
231
232 /*
233 * The boundary size just needs to be a multiple of unit_max
234 * (and not necessarily a power-of-2), so this following check
235 * could be relaxed in future.
236 * Furthermore, if needed, unit_max could even be reduced so
237 * that it is compliant with a !power-of-2 boundary.
238 */
239 if (!is_power_of_2(boundary_sectors))
240 goto unsupported;
241 }
242
243 blk_atomic_writes_update_limits(lim);
244 return;
245
246 unsupported:
247 lim->atomic_write_max_sectors = 0;
248 lim->atomic_write_boundary_sectors = 0;
249 lim->atomic_write_unit_min = 0;
250 lim->atomic_write_unit_max = 0;
251 }
252
253 /*
254 * Check that the limits in lim are valid, initialize defaults for unset
255 * values, and cap values based on others where needed.
256 */
blk_validate_limits(struct queue_limits * lim)257 int blk_validate_limits(struct queue_limits *lim)
258 {
259 unsigned int max_hw_sectors;
260 unsigned int logical_block_sectors;
261 unsigned long seg_size;
262 int err;
263
264 /*
265 * Unless otherwise specified, default to 512 byte logical blocks and a
266 * physical block size equal to the logical block size.
267 */
268 if (!lim->logical_block_size)
269 lim->logical_block_size = SECTOR_SIZE;
270 else if (blk_validate_block_size(lim->logical_block_size)) {
271 pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
272 return -EINVAL;
273 }
274 if (lim->physical_block_size < lim->logical_block_size)
275 lim->physical_block_size = lim->logical_block_size;
276
277 /*
278 * The minimum I/O size defaults to the physical block size unless
279 * explicitly overridden.
280 */
281 if (lim->io_min < lim->physical_block_size)
282 lim->io_min = lim->physical_block_size;
283
284 /*
285 * The optimal I/O size may not be aligned to physical block size
286 * (because it may be limited by dma engines which have no clue about
287 * block size of the disks attached to them), so we round it down here.
288 */
289 lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
290
291 /*
292 * max_hw_sectors has a somewhat weird default for historical reason,
293 * but driver really should set their own instead of relying on this
294 * value.
295 *
296 * The block layer relies on the fact that every driver can
297 * handle at lest a page worth of data per I/O, and needs the value
298 * aligned to the logical block size.
299 */
300 if (!lim->max_hw_sectors)
301 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
302 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
303 return -EINVAL;
304 logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
305 if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
306 return -EINVAL;
307 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
308 logical_block_sectors);
309
310 /*
311 * The actual max_sectors value is a complex beast and also takes the
312 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
313 * value into account. The ->max_sectors value is always calculated
314 * from these, so directly setting it won't have any effect.
315 */
316 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
317 lim->max_dev_sectors);
318 if (lim->max_user_sectors) {
319 if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
320 return -EINVAL;
321 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
322 } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
323 lim->max_sectors =
324 min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
325 } else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
326 lim->max_sectors =
327 min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
328 } else {
329 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
330 }
331 lim->max_sectors = round_down(lim->max_sectors,
332 logical_block_sectors);
333
334 /*
335 * Random default for the maximum number of segments. Driver should not
336 * rely on this and set their own.
337 */
338 if (!lim->max_segments)
339 lim->max_segments = BLK_MAX_SEGMENTS;
340
341 lim->max_discard_sectors =
342 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
343
344 if (!lim->max_discard_segments)
345 lim->max_discard_segments = 1;
346
347 if (lim->discard_granularity < lim->physical_block_size)
348 lim->discard_granularity = lim->physical_block_size;
349
350 /*
351 * By default there is no limit on the segment boundary alignment,
352 * but if there is one it can't be smaller than the page size as
353 * that would break all the normal I/O patterns.
354 */
355 if (!lim->seg_boundary_mask)
356 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
357 if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
358 return -EINVAL;
359
360 /*
361 * Stacking device may have both virtual boundary and max segment
362 * size limit, so allow this setting now, and long-term the two
363 * might need to move out of stacking limits since we have immutable
364 * bvec and lower layer bio splitting is supposed to handle the two
365 * correctly.
366 */
367 if (lim->virt_boundary_mask) {
368 if (!lim->max_segment_size)
369 lim->max_segment_size = UINT_MAX;
370 } else {
371 /*
372 * The maximum segment size has an odd historic 64k default that
373 * drivers probably should override. Just like the I/O size we
374 * require drivers to at least handle a full page per segment.
375 */
376 if (!lim->max_segment_size)
377 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
378 if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
379 return -EINVAL;
380 }
381
382 /* setup min segment size for building new segment in fast path */
383 if (lim->seg_boundary_mask > lim->max_segment_size - 1)
384 seg_size = lim->max_segment_size;
385 else
386 seg_size = lim->seg_boundary_mask + 1;
387 lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
388
389 /*
390 * We require drivers to at least do logical block aligned I/O, but
391 * historically could not check for that due to the separate calls
392 * to set the limits. Once the transition is finished the check
393 * below should be narrowed down to check the logical block size.
394 */
395 if (!lim->dma_alignment)
396 lim->dma_alignment = SECTOR_SIZE - 1;
397 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
398 return -EINVAL;
399
400 if (lim->alignment_offset) {
401 lim->alignment_offset &= (lim->physical_block_size - 1);
402 lim->flags &= ~BLK_FLAG_MISALIGNED;
403 }
404
405 if (!(lim->features & BLK_FEAT_WRITE_CACHE))
406 lim->features &= ~BLK_FEAT_FUA;
407
408 blk_validate_atomic_write_limits(lim);
409
410 err = blk_validate_integrity_limits(lim);
411 if (err)
412 return err;
413 return blk_validate_zoned_limits(lim);
414 }
415 EXPORT_SYMBOL_GPL(blk_validate_limits);
416
417 /*
418 * Set the default limits for a newly allocated queue. @lim contains the
419 * initial limits set by the driver, which could be no limit in which case
420 * all fields are cleared to zero.
421 */
blk_set_default_limits(struct queue_limits * lim)422 int blk_set_default_limits(struct queue_limits *lim)
423 {
424 /*
425 * Most defaults are set by capping the bounds in blk_validate_limits,
426 * but max_user_discard_sectors is special and needs an explicit
427 * initialization to the max value here.
428 */
429 lim->max_user_discard_sectors = UINT_MAX;
430 return blk_validate_limits(lim);
431 }
432
433 /**
434 * queue_limits_commit_update - commit an atomic update of queue limits
435 * @q: queue to update
436 * @lim: limits to apply
437 *
438 * Apply the limits in @lim that were obtained from queue_limits_start_update()
439 * and updated by the caller to @q. The caller must have frozen the queue or
440 * ensure that there are no outstanding I/Os by other means.
441 *
442 * Returns 0 if successful, else a negative error code.
443 */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)444 int queue_limits_commit_update(struct request_queue *q,
445 struct queue_limits *lim)
446 {
447 int error;
448
449 error = blk_validate_limits(lim);
450 if (error)
451 goto out_unlock;
452
453 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
454 if (q->crypto_profile && lim->integrity.tag_size) {
455 pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
456 error = -EINVAL;
457 goto out_unlock;
458 }
459 #endif
460
461 q->limits = *lim;
462 if (q->disk)
463 blk_apply_bdi_limits(q->disk->bdi, lim);
464 out_unlock:
465 mutex_unlock(&q->limits_lock);
466 return error;
467 }
468 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
469
470 /**
471 * queue_limits_commit_update_frozen - commit an atomic update of queue limits
472 * @q: queue to update
473 * @lim: limits to apply
474 *
475 * Apply the limits in @lim that were obtained from queue_limits_start_update()
476 * and updated with the new values by the caller to @q. Freezes the queue
477 * before the update and unfreezes it after.
478 *
479 * Returns 0 if successful, else a negative error code.
480 */
queue_limits_commit_update_frozen(struct request_queue * q,struct queue_limits * lim)481 int queue_limits_commit_update_frozen(struct request_queue *q,
482 struct queue_limits *lim)
483 {
484 unsigned int memflags;
485 int ret;
486
487 memflags = blk_mq_freeze_queue(q);
488 ret = queue_limits_commit_update(q, lim);
489 blk_mq_unfreeze_queue(q, memflags);
490
491 return ret;
492 }
493 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
494
495 /**
496 * queue_limits_set - apply queue limits to queue
497 * @q: queue to update
498 * @lim: limits to apply
499 *
500 * Apply the limits in @lim that were freshly initialized to @q.
501 * To update existing limits use queue_limits_start_update() and
502 * queue_limits_commit_update() instead.
503 *
504 * Returns 0 if successful, else a negative error code.
505 */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)506 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
507 {
508 mutex_lock(&q->limits_lock);
509 return queue_limits_commit_update(q, lim);
510 }
511 EXPORT_SYMBOL_GPL(queue_limits_set);
512
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)513 static int queue_limit_alignment_offset(const struct queue_limits *lim,
514 sector_t sector)
515 {
516 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
517 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
518 << SECTOR_SHIFT;
519
520 return (granularity + lim->alignment_offset - alignment) % granularity;
521 }
522
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)523 static unsigned int queue_limit_discard_alignment(
524 const struct queue_limits *lim, sector_t sector)
525 {
526 unsigned int alignment, granularity, offset;
527
528 if (!lim->max_discard_sectors)
529 return 0;
530
531 /* Why are these in bytes, not sectors? */
532 alignment = lim->discard_alignment >> SECTOR_SHIFT;
533 granularity = lim->discard_granularity >> SECTOR_SHIFT;
534
535 /* Offset of the partition start in 'granularity' sectors */
536 offset = sector_div(sector, granularity);
537
538 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
539 offset = (granularity + alignment - offset) % granularity;
540
541 /* Turn it back into bytes, gaah */
542 return offset << SECTOR_SHIFT;
543 }
544
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)545 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
546 {
547 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
548 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
549 sectors = PAGE_SIZE >> SECTOR_SHIFT;
550 return sectors;
551 }
552
553 /* Check if second and later bottom devices are compliant */
blk_stack_atomic_writes_tail(struct queue_limits * t,struct queue_limits * b)554 static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
555 struct queue_limits *b)
556 {
557 /* We're not going to support different boundary sizes.. yet */
558 if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
559 return false;
560
561 /* Can't support this */
562 if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
563 return false;
564
565 /* Or this */
566 if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
567 return false;
568
569 t->atomic_write_hw_max = min(t->atomic_write_hw_max,
570 b->atomic_write_hw_max);
571 t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
572 b->atomic_write_hw_unit_min);
573 t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
574 b->atomic_write_hw_unit_max);
575 return true;
576 }
577
578 /* Check for valid boundary of first bottom device */
blk_stack_atomic_writes_boundary_head(struct queue_limits * t,struct queue_limits * b)579 static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
580 struct queue_limits *b)
581 {
582 /*
583 * Ensure atomic write boundary is aligned with chunk sectors. Stacked
584 * devices store chunk sectors in t->io_min.
585 */
586 if (b->atomic_write_hw_boundary > t->io_min &&
587 b->atomic_write_hw_boundary % t->io_min)
588 return false;
589 if (t->io_min > b->atomic_write_hw_boundary &&
590 t->io_min % b->atomic_write_hw_boundary)
591 return false;
592
593 t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
594 return true;
595 }
596
597
598 /* Check stacking of first bottom device */
blk_stack_atomic_writes_head(struct queue_limits * t,struct queue_limits * b)599 static bool blk_stack_atomic_writes_head(struct queue_limits *t,
600 struct queue_limits *b)
601 {
602 if (b->atomic_write_hw_boundary &&
603 !blk_stack_atomic_writes_boundary_head(t, b))
604 return false;
605
606 if (t->io_min <= SECTOR_SIZE) {
607 /* No chunk sectors, so use bottom device values directly */
608 t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
609 t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
610 t->atomic_write_hw_max = b->atomic_write_hw_max;
611 return true;
612 }
613
614 /*
615 * Find values for limits which work for chunk size.
616 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
617 * size (t->io_min), as chunk size is not restricted to a power-of-2.
618 * So we need to find highest power-of-2 which works for the chunk
619 * size.
620 * As an example scenario, we could have b->unit_max = 16K and
621 * t->io_min = 24K. For this case, reduce t->unit_max to a value
622 * aligned with both limits, i.e. 8K in this example.
623 */
624 t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
625 while (t->io_min % t->atomic_write_hw_unit_max)
626 t->atomic_write_hw_unit_max /= 2;
627
628 t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min,
629 t->atomic_write_hw_unit_max);
630 t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min);
631
632 return true;
633 }
634
blk_stack_atomic_writes_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)635 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
636 struct queue_limits *b, sector_t start)
637 {
638 if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
639 goto unsupported;
640
641 if (!b->atomic_write_hw_unit_min)
642 goto unsupported;
643
644 if (!blk_atomic_write_start_sect_aligned(start, b))
645 goto unsupported;
646
647 /*
648 * If atomic_write_hw_max is set, we have already stacked 1x bottom
649 * device, so check for compliance.
650 */
651 if (t->atomic_write_hw_max) {
652 if (!blk_stack_atomic_writes_tail(t, b))
653 goto unsupported;
654 return;
655 }
656
657 if (!blk_stack_atomic_writes_head(t, b))
658 goto unsupported;
659 return;
660
661 unsupported:
662 t->atomic_write_hw_max = 0;
663 t->atomic_write_hw_unit_max = 0;
664 t->atomic_write_hw_unit_min = 0;
665 t->atomic_write_hw_boundary = 0;
666 }
667
668 /**
669 * blk_stack_limits - adjust queue_limits for stacked devices
670 * @t: the stacking driver limits (top device)
671 * @b: the underlying queue limits (bottom, component device)
672 * @start: first data sector within component device
673 *
674 * Description:
675 * This function is used by stacking drivers like MD and DM to ensure
676 * that all component devices have compatible block sizes and
677 * alignments. The stacking driver must provide a queue_limits
678 * struct (top) and then iteratively call the stacking function for
679 * all component (bottom) devices. The stacking function will
680 * attempt to combine the values and ensure proper alignment.
681 *
682 * Returns 0 if the top and bottom queue_limits are compatible. The
683 * top device's block sizes and alignment offsets may be adjusted to
684 * ensure alignment with the bottom device. If no compatible sizes
685 * and alignments exist, -1 is returned and the resulting top
686 * queue_limits will have the misaligned flag set to indicate that
687 * the alignment_offset is undefined.
688 */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)689 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
690 sector_t start)
691 {
692 unsigned int top, bottom, alignment, ret = 0;
693
694 t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
695
696 /*
697 * Some feaures need to be supported both by the stacking driver and all
698 * underlying devices. The stacking driver sets these flags before
699 * stacking the limits, and this will clear the flags if any of the
700 * underlying devices does not support it.
701 */
702 if (!(b->features & BLK_FEAT_NOWAIT))
703 t->features &= ~BLK_FEAT_NOWAIT;
704 if (!(b->features & BLK_FEAT_POLL))
705 t->features &= ~BLK_FEAT_POLL;
706
707 t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
708
709 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
710 t->max_user_sectors = min_not_zero(t->max_user_sectors,
711 b->max_user_sectors);
712 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
713 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
714 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
715 b->max_write_zeroes_sectors);
716 t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
717 b->max_hw_zone_append_sectors);
718
719 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
720 b->seg_boundary_mask);
721 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
722 b->virt_boundary_mask);
723
724 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
725 t->max_discard_segments = min_not_zero(t->max_discard_segments,
726 b->max_discard_segments);
727 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
728 b->max_integrity_segments);
729
730 t->max_segment_size = min_not_zero(t->max_segment_size,
731 b->max_segment_size);
732
733 alignment = queue_limit_alignment_offset(b, start);
734
735 /* Bottom device has different alignment. Check that it is
736 * compatible with the current top alignment.
737 */
738 if (t->alignment_offset != alignment) {
739
740 top = max(t->physical_block_size, t->io_min)
741 + t->alignment_offset;
742 bottom = max(b->physical_block_size, b->io_min) + alignment;
743
744 /* Verify that top and bottom intervals line up */
745 if (max(top, bottom) % min(top, bottom)) {
746 t->flags |= BLK_FLAG_MISALIGNED;
747 ret = -1;
748 }
749 }
750
751 t->logical_block_size = max(t->logical_block_size,
752 b->logical_block_size);
753
754 t->physical_block_size = max(t->physical_block_size,
755 b->physical_block_size);
756
757 t->io_min = max(t->io_min, b->io_min);
758 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
759 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
760
761 /* Set non-power-of-2 compatible chunk_sectors boundary */
762 if (b->chunk_sectors)
763 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
764
765 /* Physical block size a multiple of the logical block size? */
766 if (t->physical_block_size & (t->logical_block_size - 1)) {
767 t->physical_block_size = t->logical_block_size;
768 t->flags |= BLK_FLAG_MISALIGNED;
769 ret = -1;
770 }
771
772 /* Minimum I/O a multiple of the physical block size? */
773 if (t->io_min & (t->physical_block_size - 1)) {
774 t->io_min = t->physical_block_size;
775 t->flags |= BLK_FLAG_MISALIGNED;
776 ret = -1;
777 }
778
779 /* Optimal I/O a multiple of the physical block size? */
780 if (t->io_opt & (t->physical_block_size - 1)) {
781 t->io_opt = 0;
782 t->flags |= BLK_FLAG_MISALIGNED;
783 ret = -1;
784 }
785
786 /* chunk_sectors a multiple of the physical block size? */
787 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
788 t->chunk_sectors = 0;
789 t->flags |= BLK_FLAG_MISALIGNED;
790 ret = -1;
791 }
792
793 /* Find lowest common alignment_offset */
794 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
795 % max(t->physical_block_size, t->io_min);
796
797 /* Verify that new alignment_offset is on a logical block boundary */
798 if (t->alignment_offset & (t->logical_block_size - 1)) {
799 t->flags |= BLK_FLAG_MISALIGNED;
800 ret = -1;
801 }
802
803 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
804 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
805 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
806
807 /* Discard alignment and granularity */
808 if (b->discard_granularity) {
809 alignment = queue_limit_discard_alignment(b, start);
810
811 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
812 b->max_discard_sectors);
813 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
814 b->max_hw_discard_sectors);
815 t->discard_granularity = max(t->discard_granularity,
816 b->discard_granularity);
817 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
818 t->discard_granularity;
819 }
820 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
821 b->max_secure_erase_sectors);
822 t->zone_write_granularity = max(t->zone_write_granularity,
823 b->zone_write_granularity);
824 if (!(t->features & BLK_FEAT_ZONED)) {
825 t->zone_write_granularity = 0;
826 t->max_zone_append_sectors = 0;
827 }
828 blk_stack_atomic_writes_limits(t, b, start);
829
830 return ret;
831 }
832 EXPORT_SYMBOL(blk_stack_limits);
833
834 /**
835 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
836 * @t: the stacking driver limits (top device)
837 * @bdev: the underlying block device (bottom)
838 * @offset: offset to beginning of data within component device
839 * @pfx: prefix to use for warnings logged
840 *
841 * Description:
842 * This function is used by stacking drivers like MD and DM to ensure
843 * that all component devices have compatible block sizes and
844 * alignments. The stacking driver must provide a queue_limits
845 * struct (top) and then iteratively call the stacking function for
846 * all component (bottom) devices. The stacking function will
847 * attempt to combine the values and ensure proper alignment.
848 */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)849 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
850 sector_t offset, const char *pfx)
851 {
852 if (blk_stack_limits(t, bdev_limits(bdev),
853 get_start_sect(bdev) + offset))
854 pr_notice("%s: Warning: Device %pg is misaligned\n",
855 pfx, bdev);
856 }
857 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
858
859 /**
860 * queue_limits_stack_integrity - stack integrity profile
861 * @t: target queue limits
862 * @b: base queue limits
863 *
864 * Check if the integrity profile in the @b can be stacked into the
865 * target @t. Stacking is possible if either:
866 *
867 * a) does not have any integrity information stacked into it yet
868 * b) the integrity profile in @b is identical to the one in @t
869 *
870 * If @b can be stacked into @t, return %true. Else return %false and clear the
871 * integrity information in @t.
872 */
queue_limits_stack_integrity(struct queue_limits * t,struct queue_limits * b)873 bool queue_limits_stack_integrity(struct queue_limits *t,
874 struct queue_limits *b)
875 {
876 struct blk_integrity *ti = &t->integrity;
877 struct blk_integrity *bi = &b->integrity;
878
879 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
880 return true;
881
882 if (ti->flags & BLK_INTEGRITY_STACKED) {
883 if (ti->tuple_size != bi->tuple_size)
884 goto incompatible;
885 if (ti->interval_exp != bi->interval_exp)
886 goto incompatible;
887 if (ti->tag_size != bi->tag_size)
888 goto incompatible;
889 if (ti->csum_type != bi->csum_type)
890 goto incompatible;
891 if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
892 (bi->flags & BLK_INTEGRITY_REF_TAG))
893 goto incompatible;
894 } else {
895 ti->flags = BLK_INTEGRITY_STACKED;
896 ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
897 (bi->flags & BLK_INTEGRITY_REF_TAG);
898 ti->csum_type = bi->csum_type;
899 ti->tuple_size = bi->tuple_size;
900 ti->pi_offset = bi->pi_offset;
901 ti->interval_exp = bi->interval_exp;
902 ti->tag_size = bi->tag_size;
903 }
904 return true;
905
906 incompatible:
907 memset(ti, 0, sizeof(*ti));
908 return false;
909 }
910 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
911
912 /**
913 * blk_set_queue_depth - tell the block layer about the device queue depth
914 * @q: the request queue for the device
915 * @depth: queue depth
916 *
917 */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)918 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
919 {
920 q->queue_depth = depth;
921 rq_qos_queue_depth_changed(q);
922 }
923 EXPORT_SYMBOL(blk_set_queue_depth);
924
bdev_alignment_offset(struct block_device * bdev)925 int bdev_alignment_offset(struct block_device *bdev)
926 {
927 struct request_queue *q = bdev_get_queue(bdev);
928
929 if (q->limits.flags & BLK_FLAG_MISALIGNED)
930 return -1;
931 if (bdev_is_partition(bdev))
932 return queue_limit_alignment_offset(&q->limits,
933 bdev->bd_start_sect);
934 return q->limits.alignment_offset;
935 }
936 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
937
bdev_discard_alignment(struct block_device * bdev)938 unsigned int bdev_discard_alignment(struct block_device *bdev)
939 {
940 struct request_queue *q = bdev_get_queue(bdev);
941
942 if (bdev_is_partition(bdev))
943 return queue_limit_discard_alignment(&q->limits,
944 bdev->bd_start_sect);
945 return q->limits.discard_alignment;
946 }
947 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
948