xref: /linux/block/blk-settings.c (revision fad6551fcf537375702b9af012508156a16a1ff7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/t10-pi.h>
18 #include <linux/crc64.h>
19 
20 #include "blk.h"
21 #include "blk-rq-qos.h"
22 #include "blk-wbt.h"
23 
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)24 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
25 {
26 	WRITE_ONCE(q->rq_timeout, timeout);
27 }
28 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
29 
30 /**
31  * blk_set_stacking_limits - set default limits for stacking devices
32  * @lim:  the queue_limits structure to reset
33  *
34  * Prepare queue limits for applying limits from underlying devices using
35  * blk_stack_limits().
36  */
blk_set_stacking_limits(struct queue_limits * lim)37 void blk_set_stacking_limits(struct queue_limits *lim)
38 {
39 	memset(lim, 0, sizeof(*lim));
40 	lim->logical_block_size = SECTOR_SIZE;
41 	lim->physical_block_size = SECTOR_SIZE;
42 	lim->io_min = SECTOR_SIZE;
43 	lim->discard_granularity = SECTOR_SIZE;
44 	lim->dma_alignment = SECTOR_SIZE - 1;
45 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
46 
47 	/* Inherit limits from component devices */
48 	lim->max_segments = USHRT_MAX;
49 	lim->max_discard_segments = USHRT_MAX;
50 	lim->max_hw_sectors = UINT_MAX;
51 	lim->max_segment_size = UINT_MAX;
52 	lim->max_sectors = UINT_MAX;
53 	lim->max_dev_sectors = UINT_MAX;
54 	lim->max_write_zeroes_sectors = UINT_MAX;
55 	lim->max_hw_wzeroes_unmap_sectors = UINT_MAX;
56 	lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
57 	lim->max_hw_zone_append_sectors = UINT_MAX;
58 	lim->max_user_discard_sectors = UINT_MAX;
59 }
60 EXPORT_SYMBOL(blk_set_stacking_limits);
61 
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)62 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
63 		struct queue_limits *lim)
64 {
65 	u64 io_opt = lim->io_opt;
66 
67 	/*
68 	 * For read-ahead of large files to be effective, we need to read ahead
69 	 * at least twice the optimal I/O size. For rotational devices that do
70 	 * not report an optimal I/O size (e.g. ATA HDDs), use the maximum I/O
71 	 * size to avoid falling back to the (rather inefficient) small default
72 	 * read-ahead size.
73 	 *
74 	 * There is no hardware limitation for the read-ahead size and the user
75 	 * might have increased the read-ahead size through sysfs, so don't ever
76 	 * decrease it.
77 	 */
78 	if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL))
79 		io_opt = (u64)lim->max_sectors << SECTOR_SHIFT;
80 
81 	bdi->ra_pages = max3(bdi->ra_pages,
82 				io_opt * 2 >> PAGE_SHIFT,
83 				VM_READAHEAD_PAGES);
84 	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
85 }
86 
blk_validate_zoned_limits(struct queue_limits * lim)87 static int blk_validate_zoned_limits(struct queue_limits *lim)
88 {
89 	if (!(lim->features & BLK_FEAT_ZONED)) {
90 		if (WARN_ON_ONCE(lim->max_open_zones) ||
91 		    WARN_ON_ONCE(lim->max_active_zones) ||
92 		    WARN_ON_ONCE(lim->zone_write_granularity) ||
93 		    WARN_ON_ONCE(lim->max_zone_append_sectors))
94 			return -EINVAL;
95 		return 0;
96 	}
97 
98 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
99 		return -EINVAL;
100 
101 	/*
102 	 * Given that active zones include open zones, the maximum number of
103 	 * open zones cannot be larger than the maximum number of active zones.
104 	 */
105 	if (lim->max_active_zones &&
106 	    lim->max_open_zones > lim->max_active_zones)
107 		return -EINVAL;
108 
109 	if (lim->zone_write_granularity < lim->logical_block_size)
110 		lim->zone_write_granularity = lim->logical_block_size;
111 
112 	/*
113 	 * The Zone Append size is limited by the maximum I/O size and the zone
114 	 * size given that it can't span zones.
115 	 *
116 	 * If no max_hw_zone_append_sectors limit is provided, the block layer
117 	 * will emulated it, else we're also bound by the hardware limit.
118 	 */
119 	lim->max_zone_append_sectors =
120 		min_not_zero(lim->max_hw_zone_append_sectors,
121 			min(lim->chunk_sectors, lim->max_hw_sectors));
122 	return 0;
123 }
124 
blk_validate_integrity_limits(struct queue_limits * lim)125 static int blk_validate_integrity_limits(struct queue_limits *lim)
126 {
127 	struct blk_integrity *bi = &lim->integrity;
128 
129 	if (!bi->metadata_size) {
130 		if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
131 		    bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
132 			pr_warn("invalid PI settings.\n");
133 			return -EINVAL;
134 		}
135 		bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
136 		return 0;
137 	}
138 
139 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
140 		pr_warn("integrity support disabled.\n");
141 		return -EINVAL;
142 	}
143 
144 	if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
145 	    (bi->flags & BLK_INTEGRITY_REF_TAG)) {
146 		pr_warn("ref tag not support without checksum.\n");
147 		return -EINVAL;
148 	}
149 
150 	if (bi->pi_tuple_size > bi->metadata_size) {
151 		pr_warn("pi_tuple_size (%u) exceeds metadata_size (%u)\n",
152 			 bi->pi_tuple_size,
153 			 bi->metadata_size);
154 		return -EINVAL;
155 	}
156 
157 	switch (bi->csum_type) {
158 	case BLK_INTEGRITY_CSUM_NONE:
159 		if (bi->pi_tuple_size) {
160 			pr_warn("pi_tuple_size must be 0 when checksum type \
161 				 is none\n");
162 			return -EINVAL;
163 		}
164 		break;
165 	case BLK_INTEGRITY_CSUM_CRC:
166 	case BLK_INTEGRITY_CSUM_IP:
167 		if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
168 			pr_warn("pi_tuple_size mismatch for T10 PI: expected \
169 				 %zu, got %u\n",
170 				 sizeof(struct t10_pi_tuple),
171 				 bi->pi_tuple_size);
172 			return -EINVAL;
173 		}
174 		break;
175 	case BLK_INTEGRITY_CSUM_CRC64:
176 		if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
177 			pr_warn("pi_tuple_size mismatch for CRC64 PI: \
178 				 expected %zu, got %u\n",
179 				 sizeof(struct crc64_pi_tuple),
180 				 bi->pi_tuple_size);
181 			return -EINVAL;
182 		}
183 		break;
184 	}
185 
186 	if (!bi->interval_exp)
187 		bi->interval_exp = ilog2(lim->logical_block_size);
188 
189 	return 0;
190 }
191 
192 /*
193  * Returns max guaranteed bytes which we can fit in a bio.
194  *
195  * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
196  * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
197  * the first and last segments.
198  */
blk_queue_max_guaranteed_bio(struct queue_limits * lim)199 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
200 {
201 	unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
202 	unsigned int length;
203 
204 	length = min(max_segments, 2) * lim->logical_block_size;
205 	if (max_segments > 2)
206 		length += (max_segments - 2) * PAGE_SIZE;
207 
208 	return length;
209 }
210 
blk_atomic_writes_update_limits(struct queue_limits * lim)211 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
212 {
213 	unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
214 					blk_queue_max_guaranteed_bio(lim));
215 
216 	unit_limit = rounddown_pow_of_two(unit_limit);
217 
218 	lim->atomic_write_max_sectors =
219 		min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
220 			lim->max_hw_sectors);
221 	lim->atomic_write_unit_min =
222 		min(lim->atomic_write_hw_unit_min, unit_limit);
223 	lim->atomic_write_unit_max =
224 		min(lim->atomic_write_hw_unit_max, unit_limit);
225 	lim->atomic_write_boundary_sectors =
226 		lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
227 }
228 
blk_validate_atomic_write_limits(struct queue_limits * lim)229 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
230 {
231 	unsigned int boundary_sectors;
232 	unsigned int atomic_write_hw_max_sectors =
233 			lim->atomic_write_hw_max >> SECTOR_SHIFT;
234 
235 	if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
236 		goto unsupported;
237 
238 	if (!lim->atomic_write_hw_max)
239 		goto unsupported;
240 
241 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
242 		goto unsupported;
243 
244 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
245 		goto unsupported;
246 
247 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
248 			 lim->atomic_write_hw_unit_max))
249 		goto unsupported;
250 
251 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
252 			 lim->atomic_write_hw_max))
253 		goto unsupported;
254 
255 	if (WARN_ON_ONCE(lim->chunk_sectors &&
256 			atomic_write_hw_max_sectors > lim->chunk_sectors))
257 		goto unsupported;
258 
259 	boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
260 
261 	if (boundary_sectors) {
262 		if (WARN_ON_ONCE(lim->atomic_write_hw_max >
263 				 lim->atomic_write_hw_boundary))
264 			goto unsupported;
265 		/*
266 		 * A feature of boundary support is that it disallows bios to
267 		 * be merged which would result in a merged request which
268 		 * crosses either a chunk sector or atomic write HW boundary,
269 		 * even though chunk sectors may be just set for performance.
270 		 * For simplicity, disallow atomic writes for a chunk sector
271 		 * which is non-zero and smaller than atomic write HW boundary.
272 		 * Furthermore, chunk sectors must be a multiple of atomic
273 		 * write HW boundary. Otherwise boundary support becomes
274 		 * complicated.
275 		 * Devices which do not conform to these rules can be dealt
276 		 * with if and when they show up.
277 		 */
278 		if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
279 			goto unsupported;
280 
281 		/*
282 		 * The boundary size just needs to be a multiple of unit_max
283 		 * (and not necessarily a power-of-2), so this following check
284 		 * could be relaxed in future.
285 		 * Furthermore, if needed, unit_max could even be reduced so
286 		 * that it is compliant with a !power-of-2 boundary.
287 		 */
288 		if (!is_power_of_2(boundary_sectors))
289 			goto unsupported;
290 	}
291 
292 	blk_atomic_writes_update_limits(lim);
293 	return;
294 
295 unsupported:
296 	lim->atomic_write_max_sectors = 0;
297 	lim->atomic_write_boundary_sectors = 0;
298 	lim->atomic_write_unit_min = 0;
299 	lim->atomic_write_unit_max = 0;
300 }
301 
302 /*
303  * Check that the limits in lim are valid, initialize defaults for unset
304  * values, and cap values based on others where needed.
305  */
blk_validate_limits(struct queue_limits * lim)306 int blk_validate_limits(struct queue_limits *lim)
307 {
308 	unsigned int max_hw_sectors;
309 	unsigned int logical_block_sectors;
310 	unsigned long seg_size;
311 	int err;
312 
313 	/*
314 	 * Unless otherwise specified, default to 512 byte logical blocks and a
315 	 * physical block size equal to the logical block size.
316 	 */
317 	if (!lim->logical_block_size)
318 		lim->logical_block_size = SECTOR_SIZE;
319 	else if (blk_validate_block_size(lim->logical_block_size)) {
320 		pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
321 		return -EINVAL;
322 	}
323 	if (lim->physical_block_size < lim->logical_block_size) {
324 		lim->physical_block_size = lim->logical_block_size;
325 	} else if (!is_power_of_2(lim->physical_block_size)) {
326 		pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size);
327 		return -EINVAL;
328 	}
329 
330 	/*
331 	 * The minimum I/O size defaults to the physical block size unless
332 	 * explicitly overridden.
333 	 */
334 	if (lim->io_min < lim->physical_block_size)
335 		lim->io_min = lim->physical_block_size;
336 
337 	/*
338 	 * The optimal I/O size may not be aligned to physical block size
339 	 * (because it may be limited by dma engines which have no clue about
340 	 * block size of the disks attached to them), so we round it down here.
341 	 */
342 	lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
343 
344 	/*
345 	 * max_hw_sectors has a somewhat weird default for historical reason,
346 	 * but driver really should set their own instead of relying on this
347 	 * value.
348 	 *
349 	 * The block layer relies on the fact that every driver can
350 	 * handle at lest a page worth of data per I/O, and needs the value
351 	 * aligned to the logical block size.
352 	 */
353 	if (!lim->max_hw_sectors)
354 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
355 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
356 		return -EINVAL;
357 	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
358 	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
359 		return -EINVAL;
360 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
361 			logical_block_sectors);
362 
363 	/*
364 	 * The actual max_sectors value is a complex beast and also takes the
365 	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
366 	 * value into account.  The ->max_sectors value is always calculated
367 	 * from these, so directly setting it won't have any effect.
368 	 */
369 	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
370 				lim->max_dev_sectors);
371 	if (lim->max_user_sectors) {
372 		if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
373 			return -EINVAL;
374 		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
375 	} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
376 		lim->max_sectors =
377 			min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
378 	} else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
379 		lim->max_sectors =
380 			min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
381 	} else {
382 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
383 	}
384 	lim->max_sectors = round_down(lim->max_sectors,
385 			logical_block_sectors);
386 
387 	/*
388 	 * Random default for the maximum number of segments.  Driver should not
389 	 * rely on this and set their own.
390 	 */
391 	if (!lim->max_segments)
392 		lim->max_segments = BLK_MAX_SEGMENTS;
393 
394 	if (lim->max_hw_wzeroes_unmap_sectors &&
395 	    lim->max_hw_wzeroes_unmap_sectors != lim->max_write_zeroes_sectors)
396 		return -EINVAL;
397 	lim->max_wzeroes_unmap_sectors = min(lim->max_hw_wzeroes_unmap_sectors,
398 			lim->max_user_wzeroes_unmap_sectors);
399 
400 	lim->max_discard_sectors =
401 		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
402 
403 	/*
404 	 * When discard is not supported, discard_granularity should be reported
405 	 * as 0 to userspace.
406 	 */
407 	if (lim->max_discard_sectors)
408 		lim->discard_granularity =
409 			max(lim->discard_granularity, lim->physical_block_size);
410 	else
411 		lim->discard_granularity = 0;
412 
413 	if (!lim->max_discard_segments)
414 		lim->max_discard_segments = 1;
415 
416 	/*
417 	 * By default there is no limit on the segment boundary alignment,
418 	 * but if there is one it can't be smaller than the page size as
419 	 * that would break all the normal I/O patterns.
420 	 */
421 	if (!lim->seg_boundary_mask)
422 		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
423 	if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
424 		return -EINVAL;
425 
426 	/*
427 	 * Stacking device may have both virtual boundary and max segment
428 	 * size limit, so allow this setting now, and long-term the two
429 	 * might need to move out of stacking limits since we have immutable
430 	 * bvec and lower layer bio splitting is supposed to handle the two
431 	 * correctly.
432 	 */
433 	if (lim->virt_boundary_mask) {
434 		if (!lim->max_segment_size)
435 			lim->max_segment_size = UINT_MAX;
436 	} else {
437 		/*
438 		 * The maximum segment size has an odd historic 64k default that
439 		 * drivers probably should override.  Just like the I/O size we
440 		 * require drivers to at least handle a full page per segment.
441 		 */
442 		if (!lim->max_segment_size)
443 			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
444 		if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
445 			return -EINVAL;
446 	}
447 
448 	/* setup min segment size for building new segment in fast path */
449 	if (lim->seg_boundary_mask > lim->max_segment_size - 1)
450 		seg_size = lim->max_segment_size;
451 	else
452 		seg_size = lim->seg_boundary_mask + 1;
453 	lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
454 
455 	/*
456 	 * We require drivers to at least do logical block aligned I/O, but
457 	 * historically could not check for that due to the separate calls
458 	 * to set the limits.  Once the transition is finished the check
459 	 * below should be narrowed down to check the logical block size.
460 	 */
461 	if (!lim->dma_alignment)
462 		lim->dma_alignment = SECTOR_SIZE - 1;
463 	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
464 		return -EINVAL;
465 
466 	if (lim->alignment_offset) {
467 		lim->alignment_offset &= (lim->physical_block_size - 1);
468 		lim->flags &= ~BLK_FLAG_MISALIGNED;
469 	}
470 
471 	if (!(lim->features & BLK_FEAT_WRITE_CACHE))
472 		lim->features &= ~BLK_FEAT_FUA;
473 
474 	blk_validate_atomic_write_limits(lim);
475 
476 	err = blk_validate_integrity_limits(lim);
477 	if (err)
478 		return err;
479 	return blk_validate_zoned_limits(lim);
480 }
481 EXPORT_SYMBOL_GPL(blk_validate_limits);
482 
483 /*
484  * Set the default limits for a newly allocated queue.  @lim contains the
485  * initial limits set by the driver, which could be no limit in which case
486  * all fields are cleared to zero.
487  */
blk_set_default_limits(struct queue_limits * lim)488 int blk_set_default_limits(struct queue_limits *lim)
489 {
490 	/*
491 	 * Most defaults are set by capping the bounds in blk_validate_limits,
492 	 * but these limits are special and need an explicit initialization to
493 	 * the max value here.
494 	 */
495 	lim->max_user_discard_sectors = UINT_MAX;
496 	lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
497 	return blk_validate_limits(lim);
498 }
499 
500 /**
501  * queue_limits_commit_update - commit an atomic update of queue limits
502  * @q:		queue to update
503  * @lim:	limits to apply
504  *
505  * Apply the limits in @lim that were obtained from queue_limits_start_update()
506  * and updated by the caller to @q.  The caller must have frozen the queue or
507  * ensure that there are no outstanding I/Os by other means.
508  *
509  * Returns 0 if successful, else a negative error code.
510  */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)511 int queue_limits_commit_update(struct request_queue *q,
512 		struct queue_limits *lim)
513 {
514 	int error;
515 
516 	error = blk_validate_limits(lim);
517 	if (error)
518 		goto out_unlock;
519 
520 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
521 	if (q->crypto_profile && lim->integrity.tag_size) {
522 		pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
523 		error = -EINVAL;
524 		goto out_unlock;
525 	}
526 #endif
527 
528 	q->limits = *lim;
529 	if (q->disk)
530 		blk_apply_bdi_limits(q->disk->bdi, lim);
531 out_unlock:
532 	mutex_unlock(&q->limits_lock);
533 	return error;
534 }
535 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
536 
537 /**
538  * queue_limits_commit_update_frozen - commit an atomic update of queue limits
539  * @q:		queue to update
540  * @lim:	limits to apply
541  *
542  * Apply the limits in @lim that were obtained from queue_limits_start_update()
543  * and updated with the new values by the caller to @q.  Freezes the queue
544  * before the update and unfreezes it after.
545  *
546  * Returns 0 if successful, else a negative error code.
547  */
queue_limits_commit_update_frozen(struct request_queue * q,struct queue_limits * lim)548 int queue_limits_commit_update_frozen(struct request_queue *q,
549 		struct queue_limits *lim)
550 {
551 	unsigned int memflags;
552 	int ret;
553 
554 	memflags = blk_mq_freeze_queue(q);
555 	ret = queue_limits_commit_update(q, lim);
556 	blk_mq_unfreeze_queue(q, memflags);
557 
558 	return ret;
559 }
560 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
561 
562 /**
563  * queue_limits_set - apply queue limits to queue
564  * @q:		queue to update
565  * @lim:	limits to apply
566  *
567  * Apply the limits in @lim that were freshly initialized to @q.
568  * To update existing limits use queue_limits_start_update() and
569  * queue_limits_commit_update() instead.
570  *
571  * Returns 0 if successful, else a negative error code.
572  */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)573 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
574 {
575 	mutex_lock(&q->limits_lock);
576 	return queue_limits_commit_update(q, lim);
577 }
578 EXPORT_SYMBOL_GPL(queue_limits_set);
579 
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)580 static int queue_limit_alignment_offset(const struct queue_limits *lim,
581 		sector_t sector)
582 {
583 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
584 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
585 		<< SECTOR_SHIFT;
586 
587 	return (granularity + lim->alignment_offset - alignment) % granularity;
588 }
589 
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)590 static unsigned int queue_limit_discard_alignment(
591 		const struct queue_limits *lim, sector_t sector)
592 {
593 	unsigned int alignment, granularity, offset;
594 
595 	if (!lim->max_discard_sectors)
596 		return 0;
597 
598 	/* Why are these in bytes, not sectors? */
599 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
600 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
601 
602 	/* Offset of the partition start in 'granularity' sectors */
603 	offset = sector_div(sector, granularity);
604 
605 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
606 	offset = (granularity + alignment - offset) % granularity;
607 
608 	/* Turn it back into bytes, gaah */
609 	return offset << SECTOR_SHIFT;
610 }
611 
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)612 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
613 {
614 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
615 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
616 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
617 	return sectors;
618 }
619 
620 /* Check if second and later bottom devices are compliant */
blk_stack_atomic_writes_tail(struct queue_limits * t,struct queue_limits * b)621 static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
622 				struct queue_limits *b)
623 {
624 	/* We're not going to support different boundary sizes.. yet */
625 	if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
626 		return false;
627 
628 	/* Can't support this */
629 	if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
630 		return false;
631 
632 	/* Or this */
633 	if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
634 		return false;
635 
636 	t->atomic_write_hw_max = min(t->atomic_write_hw_max,
637 				b->atomic_write_hw_max);
638 	t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
639 				b->atomic_write_hw_unit_min);
640 	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
641 				b->atomic_write_hw_unit_max);
642 	return true;
643 }
644 
645 /* Check for valid boundary of first bottom device */
blk_stack_atomic_writes_boundary_head(struct queue_limits * t,struct queue_limits * b)646 static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
647 				struct queue_limits *b)
648 {
649 	/*
650 	 * Ensure atomic write boundary is aligned with chunk sectors. Stacked
651 	 * devices store chunk sectors in t->io_min.
652 	 */
653 	if (b->atomic_write_hw_boundary > t->io_min &&
654 	    b->atomic_write_hw_boundary % t->io_min)
655 		return false;
656 	if (t->io_min > b->atomic_write_hw_boundary &&
657 	    t->io_min % b->atomic_write_hw_boundary)
658 		return false;
659 
660 	t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
661 	return true;
662 }
663 
blk_stack_atomic_writes_chunk_sectors(struct queue_limits * t)664 static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
665 {
666 	unsigned int chunk_bytes;
667 
668 	if (!t->chunk_sectors)
669 		return;
670 
671 	/*
672 	 * If chunk sectors is so large that its value in bytes overflows
673 	 * UINT_MAX, then just shift it down so it definitely will fit.
674 	 * We don't support atomic writes of such a large size anyway.
675 	 */
676 	if (check_shl_overflow(t->chunk_sectors, SECTOR_SHIFT, &chunk_bytes))
677 		chunk_bytes = t->chunk_sectors;
678 
679 	/*
680 	 * Find values for limits which work for chunk size.
681 	 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
682 	 * size, as the chunk size is not restricted to a power-of-2.
683 	 * So we need to find highest power-of-2 which works for the chunk
684 	 * size.
685 	 * As an example scenario, we could have t->unit_max = 16K and
686 	 * t->chunk_sectors = 24KB. For this case, reduce t->unit_max to a
687 	 * value aligned with both limits, i.e. 8K in this example.
688 	 */
689 	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
690 					max_pow_of_two_factor(chunk_bytes));
691 
692 	t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
693 					  t->atomic_write_hw_unit_max);
694 	t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
695 }
696 
697 /* Check stacking of first bottom device */
blk_stack_atomic_writes_head(struct queue_limits * t,struct queue_limits * b)698 static bool blk_stack_atomic_writes_head(struct queue_limits *t,
699 				struct queue_limits *b)
700 {
701 	if (b->atomic_write_hw_boundary &&
702 	    !blk_stack_atomic_writes_boundary_head(t, b))
703 		return false;
704 
705 	t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
706 	t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
707 	t->atomic_write_hw_max = b->atomic_write_hw_max;
708 	return true;
709 }
710 
blk_stack_atomic_writes_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)711 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
712 				struct queue_limits *b, sector_t start)
713 {
714 	if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
715 		goto unsupported;
716 
717 	if (!b->atomic_write_hw_unit_min)
718 		goto unsupported;
719 
720 	if (!blk_atomic_write_start_sect_aligned(start, b))
721 		goto unsupported;
722 
723 	/*
724 	 * If atomic_write_hw_max is set, we have already stacked 1x bottom
725 	 * device, so check for compliance.
726 	 */
727 	if (t->atomic_write_hw_max) {
728 		if (!blk_stack_atomic_writes_tail(t, b))
729 			goto unsupported;
730 		return;
731 	}
732 
733 	if (!blk_stack_atomic_writes_head(t, b))
734 		goto unsupported;
735 	blk_stack_atomic_writes_chunk_sectors(t);
736 	return;
737 
738 unsupported:
739 	t->atomic_write_hw_max = 0;
740 	t->atomic_write_hw_unit_max = 0;
741 	t->atomic_write_hw_unit_min = 0;
742 	t->atomic_write_hw_boundary = 0;
743 }
744 
745 /**
746  * blk_stack_limits - adjust queue_limits for stacked devices
747  * @t:	the stacking driver limits (top device)
748  * @b:  the underlying queue limits (bottom, component device)
749  * @start:  first data sector within component device
750  *
751  * Description:
752  *    This function is used by stacking drivers like MD and DM to ensure
753  *    that all component devices have compatible block sizes and
754  *    alignments.  The stacking driver must provide a queue_limits
755  *    struct (top) and then iteratively call the stacking function for
756  *    all component (bottom) devices.  The stacking function will
757  *    attempt to combine the values and ensure proper alignment.
758  *
759  *    Returns 0 if the top and bottom queue_limits are compatible.  The
760  *    top device's block sizes and alignment offsets may be adjusted to
761  *    ensure alignment with the bottom device. If no compatible sizes
762  *    and alignments exist, -1 is returned and the resulting top
763  *    queue_limits will have the misaligned flag set to indicate that
764  *    the alignment_offset is undefined.
765  */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)766 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
767 		     sector_t start)
768 {
769 	unsigned int top, bottom, alignment, ret = 0;
770 
771 	t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
772 
773 	/*
774 	 * Some feaures need to be supported both by the stacking driver and all
775 	 * underlying devices.  The stacking driver sets these flags before
776 	 * stacking the limits, and this will clear the flags if any of the
777 	 * underlying devices does not support it.
778 	 */
779 	if (!(b->features & BLK_FEAT_NOWAIT))
780 		t->features &= ~BLK_FEAT_NOWAIT;
781 	if (!(b->features & BLK_FEAT_POLL))
782 		t->features &= ~BLK_FEAT_POLL;
783 
784 	t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
785 
786 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
787 	t->max_user_sectors = min_not_zero(t->max_user_sectors,
788 			b->max_user_sectors);
789 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
790 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
791 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
792 					b->max_write_zeroes_sectors);
793 	t->max_user_wzeroes_unmap_sectors =
794 			min(t->max_user_wzeroes_unmap_sectors,
795 			    b->max_user_wzeroes_unmap_sectors);
796 	t->max_hw_wzeroes_unmap_sectors =
797 			min(t->max_hw_wzeroes_unmap_sectors,
798 			    b->max_hw_wzeroes_unmap_sectors);
799 
800 	t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
801 					b->max_hw_zone_append_sectors);
802 
803 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
804 					    b->seg_boundary_mask);
805 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
806 					    b->virt_boundary_mask);
807 
808 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
809 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
810 					       b->max_discard_segments);
811 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
812 						 b->max_integrity_segments);
813 
814 	t->max_segment_size = min_not_zero(t->max_segment_size,
815 					   b->max_segment_size);
816 
817 	alignment = queue_limit_alignment_offset(b, start);
818 
819 	/* Bottom device has different alignment.  Check that it is
820 	 * compatible with the current top alignment.
821 	 */
822 	if (t->alignment_offset != alignment) {
823 
824 		top = max(t->physical_block_size, t->io_min)
825 			+ t->alignment_offset;
826 		bottom = max(b->physical_block_size, b->io_min) + alignment;
827 
828 		/* Verify that top and bottom intervals line up */
829 		if (max(top, bottom) % min(top, bottom)) {
830 			t->flags |= BLK_FLAG_MISALIGNED;
831 			ret = -1;
832 		}
833 	}
834 
835 	t->logical_block_size = max(t->logical_block_size,
836 				    b->logical_block_size);
837 
838 	t->physical_block_size = max(t->physical_block_size,
839 				     b->physical_block_size);
840 
841 	t->io_min = max(t->io_min, b->io_min);
842 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
843 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
844 
845 	/* Set non-power-of-2 compatible chunk_sectors boundary */
846 	if (b->chunk_sectors)
847 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
848 
849 	/* Physical block size a multiple of the logical block size? */
850 	if (t->physical_block_size & (t->logical_block_size - 1)) {
851 		t->physical_block_size = t->logical_block_size;
852 		t->flags |= BLK_FLAG_MISALIGNED;
853 		ret = -1;
854 	}
855 
856 	/* Minimum I/O a multiple of the physical block size? */
857 	if (t->io_min & (t->physical_block_size - 1)) {
858 		t->io_min = t->physical_block_size;
859 		t->flags |= BLK_FLAG_MISALIGNED;
860 		ret = -1;
861 	}
862 
863 	/* Optimal I/O a multiple of the physical block size? */
864 	if (t->io_opt & (t->physical_block_size - 1)) {
865 		t->io_opt = 0;
866 		t->flags |= BLK_FLAG_MISALIGNED;
867 		ret = -1;
868 	}
869 
870 	/* chunk_sectors a multiple of the physical block size? */
871 	if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
872 		t->chunk_sectors = 0;
873 		t->flags |= BLK_FLAG_MISALIGNED;
874 		ret = -1;
875 	}
876 
877 	/* Find lowest common alignment_offset */
878 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
879 		% max(t->physical_block_size, t->io_min);
880 
881 	/* Verify that new alignment_offset is on a logical block boundary */
882 	if (t->alignment_offset & (t->logical_block_size - 1)) {
883 		t->flags |= BLK_FLAG_MISALIGNED;
884 		ret = -1;
885 	}
886 
887 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
888 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
889 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
890 
891 	/* Discard alignment and granularity */
892 	if (b->discard_granularity) {
893 		alignment = queue_limit_discard_alignment(b, start);
894 
895 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
896 						      b->max_discard_sectors);
897 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
898 							 b->max_hw_discard_sectors);
899 		t->discard_granularity = max(t->discard_granularity,
900 					     b->discard_granularity);
901 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
902 			t->discard_granularity;
903 	}
904 	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
905 						   b->max_secure_erase_sectors);
906 	t->zone_write_granularity = max(t->zone_write_granularity,
907 					b->zone_write_granularity);
908 	if (!(t->features & BLK_FEAT_ZONED)) {
909 		t->zone_write_granularity = 0;
910 		t->max_zone_append_sectors = 0;
911 	}
912 	blk_stack_atomic_writes_limits(t, b, start);
913 
914 	return ret;
915 }
916 EXPORT_SYMBOL(blk_stack_limits);
917 
918 /**
919  * queue_limits_stack_bdev - adjust queue_limits for stacked devices
920  * @t:	the stacking driver limits (top device)
921  * @bdev:  the underlying block device (bottom)
922  * @offset:  offset to beginning of data within component device
923  * @pfx: prefix to use for warnings logged
924  *
925  * Description:
926  *    This function is used by stacking drivers like MD and DM to ensure
927  *    that all component devices have compatible block sizes and
928  *    alignments.  The stacking driver must provide a queue_limits
929  *    struct (top) and then iteratively call the stacking function for
930  *    all component (bottom) devices.  The stacking function will
931  *    attempt to combine the values and ensure proper alignment.
932  */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)933 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
934 		sector_t offset, const char *pfx)
935 {
936 	if (blk_stack_limits(t, bdev_limits(bdev),
937 			get_start_sect(bdev) + offset))
938 		pr_notice("%s: Warning: Device %pg is misaligned\n",
939 			pfx, bdev);
940 }
941 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
942 
943 /**
944  * queue_limits_stack_integrity - stack integrity profile
945  * @t: target queue limits
946  * @b: base queue limits
947  *
948  * Check if the integrity profile in the @b can be stacked into the
949  * target @t.  Stacking is possible if either:
950  *
951  *   a) does not have any integrity information stacked into it yet
952  *   b) the integrity profile in @b is identical to the one in @t
953  *
954  * If @b can be stacked into @t, return %true.  Else return %false and clear the
955  * integrity information in @t.
956  */
queue_limits_stack_integrity(struct queue_limits * t,struct queue_limits * b)957 bool queue_limits_stack_integrity(struct queue_limits *t,
958 		struct queue_limits *b)
959 {
960 	struct blk_integrity *ti = &t->integrity;
961 	struct blk_integrity *bi = &b->integrity;
962 
963 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
964 		return true;
965 
966 	if (ti->flags & BLK_INTEGRITY_STACKED) {
967 		if (ti->metadata_size != bi->metadata_size)
968 			goto incompatible;
969 		if (ti->interval_exp != bi->interval_exp)
970 			goto incompatible;
971 		if (ti->tag_size != bi->tag_size)
972 			goto incompatible;
973 		if (ti->csum_type != bi->csum_type)
974 			goto incompatible;
975 		if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
976 		    (bi->flags & BLK_INTEGRITY_REF_TAG))
977 			goto incompatible;
978 	} else {
979 		ti->flags = BLK_INTEGRITY_STACKED;
980 		ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
981 			     (bi->flags & BLK_INTEGRITY_REF_TAG);
982 		ti->csum_type = bi->csum_type;
983 		ti->metadata_size = bi->metadata_size;
984 		ti->pi_offset = bi->pi_offset;
985 		ti->interval_exp = bi->interval_exp;
986 		ti->tag_size = bi->tag_size;
987 	}
988 	return true;
989 
990 incompatible:
991 	memset(ti, 0, sizeof(*ti));
992 	return false;
993 }
994 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
995 
996 /**
997  * blk_set_queue_depth - tell the block layer about the device queue depth
998  * @q:		the request queue for the device
999  * @depth:		queue depth
1000  *
1001  */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)1002 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
1003 {
1004 	q->queue_depth = depth;
1005 	rq_qos_queue_depth_changed(q);
1006 }
1007 EXPORT_SYMBOL(blk_set_queue_depth);
1008 
bdev_alignment_offset(struct block_device * bdev)1009 int bdev_alignment_offset(struct block_device *bdev)
1010 {
1011 	struct request_queue *q = bdev_get_queue(bdev);
1012 
1013 	if (q->limits.flags & BLK_FLAG_MISALIGNED)
1014 		return -1;
1015 	if (bdev_is_partition(bdev))
1016 		return queue_limit_alignment_offset(&q->limits,
1017 				bdev->bd_start_sect);
1018 	return q->limits.alignment_offset;
1019 }
1020 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
1021 
bdev_discard_alignment(struct block_device * bdev)1022 unsigned int bdev_discard_alignment(struct block_device *bdev)
1023 {
1024 	struct request_queue *q = bdev_get_queue(bdev);
1025 
1026 	if (bdev_is_partition(bdev))
1027 		return queue_limit_discard_alignment(&q->limits,
1028 				bdev->bd_start_sect);
1029 	return q->limits.discard_alignment;
1030 }
1031 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
1032