xref: /linux/block/t10-pi.c (revision 7fe6ac157b7e15c8976bd62ad7cb98e248884e83)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * t10_pi.c - Functions for generating and verifying T10 Protection
4  *	      Information.
5  */
6 
7 #include <linux/t10-pi.h>
8 #include <linux/blk-integrity.h>
9 #include <linux/crc-t10dif.h>
10 #include <linux/crc64.h>
11 #include <net/checksum.h>
12 #include <linux/unaligned.h>
13 #include "blk.h"
14 
15 #define APP_TAG_ESCAPE 0xffff
16 #define REF_TAG_ESCAPE 0xffffffff
17 
18 /*
19  * This union is used for onstack allocations when the pi field is split across
20  * segments. blk_validate_integrity_limits() guarantees pi_tuple_size matches
21  * the sizeof one of these two types.
22  */
23 union pi_tuple {
24 	struct crc64_pi_tuple	crc64_pi;
25 	struct t10_pi_tuple	t10_pi;
26 };
27 
28 struct blk_integrity_iter {
29 	struct bio			*bio;
30 	struct bio_integrity_payload	*bip;
31 	struct blk_integrity		*bi;
32 	struct bvec_iter		data_iter;
33 	struct bvec_iter		prot_iter;
34 	unsigned int			interval_remaining;
35 	u64				seed;
36 	u64				csum;
37 };
38 
blk_calculate_guard(struct blk_integrity_iter * iter,void * data,unsigned int len)39 static void blk_calculate_guard(struct blk_integrity_iter *iter, void *data,
40 				unsigned int len)
41 {
42 	switch (iter->bi->csum_type) {
43 	case BLK_INTEGRITY_CSUM_CRC64:
44 		iter->csum = crc64_nvme(iter->csum, data, len);
45 		break;
46 	case BLK_INTEGRITY_CSUM_CRC:
47 		iter->csum = crc_t10dif_update(iter->csum, data, len);
48 		break;
49 	case BLK_INTEGRITY_CSUM_IP:
50 		iter->csum = (__force u32)csum_partial(data, len,
51 						(__force __wsum)iter->csum);
52 		break;
53 	default:
54 		WARN_ON_ONCE(1);
55 		iter->csum = U64_MAX;
56 		break;
57 	}
58 }
59 
blk_integrity_csum_finish(struct blk_integrity_iter * iter)60 static void blk_integrity_csum_finish(struct blk_integrity_iter *iter)
61 {
62 	switch (iter->bi->csum_type) {
63 	case BLK_INTEGRITY_CSUM_IP:
64 		iter->csum = (__force u16)csum_fold((__force __wsum)iter->csum);
65 		break;
66 	default:
67 		break;
68 	}
69 }
70 
71 /*
72  * Update the csum for formats that have metadata padding in front of the data
73  * integrity field
74  */
blk_integrity_csum_offset(struct blk_integrity_iter * iter)75 static void blk_integrity_csum_offset(struct blk_integrity_iter *iter)
76 {
77 	unsigned int offset = iter->bi->pi_offset;
78 	struct bio_vec *bvec = iter->bip->bip_vec;
79 
80 	while (offset > 0) {
81 		struct bio_vec pbv = bvec_iter_bvec(bvec, iter->prot_iter);
82 		unsigned int len = min(pbv.bv_len, offset);
83 		void *prot_buf = bvec_kmap_local(&pbv);
84 
85 		blk_calculate_guard(iter, prot_buf, len);
86 		kunmap_local(prot_buf);
87 		offset -= len;
88 		bvec_iter_advance_single(bvec, &iter->prot_iter, len);
89 	}
90 	blk_integrity_csum_finish(iter);
91 }
92 
blk_integrity_copy_from_tuple(struct bio_integrity_payload * bip,struct bvec_iter * iter,void * tuple,unsigned int tuple_size)93 static void blk_integrity_copy_from_tuple(struct bio_integrity_payload *bip,
94 					  struct bvec_iter *iter, void *tuple,
95 					  unsigned int tuple_size)
96 {
97 	while (tuple_size) {
98 		struct bio_vec pbv = bvec_iter_bvec(bip->bip_vec, *iter);
99 		unsigned int len = min(tuple_size, pbv.bv_len);
100 		void *prot_buf = bvec_kmap_local(&pbv);
101 
102 		memcpy(prot_buf, tuple, len);
103 		kunmap_local(prot_buf);
104 		bvec_iter_advance_single(bip->bip_vec, iter, len);
105 		tuple_size -= len;
106 		tuple += len;
107 	}
108 }
109 
blk_integrity_copy_to_tuple(struct bio_integrity_payload * bip,struct bvec_iter * iter,void * tuple,unsigned int tuple_size)110 static void blk_integrity_copy_to_tuple(struct bio_integrity_payload *bip,
111 					struct bvec_iter *iter, void *tuple,
112 					unsigned int tuple_size)
113 {
114 	while (tuple_size) {
115 		struct bio_vec pbv = bvec_iter_bvec(bip->bip_vec, *iter);
116 		unsigned int len = min(tuple_size, pbv.bv_len);
117 		void *prot_buf = bvec_kmap_local(&pbv);
118 
119 		memcpy(tuple, prot_buf, len);
120 		kunmap_local(prot_buf);
121 		bvec_iter_advance_single(bip->bip_vec, iter, len);
122 		tuple_size -= len;
123 		tuple += len;
124 	}
125 }
126 
ext_pi_ref_escape(const u8 ref_tag[6])127 static bool ext_pi_ref_escape(const u8 ref_tag[6])
128 {
129 	static const u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
130 
131 	return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0;
132 }
133 
blk_verify_ext_pi(struct blk_integrity_iter * iter,struct crc64_pi_tuple * pi)134 static blk_status_t blk_verify_ext_pi(struct blk_integrity_iter *iter,
135 				      struct crc64_pi_tuple *pi)
136 {
137 	u64 seed = lower_48_bits(iter->seed);
138 	u64 guard = get_unaligned_be64(&pi->guard_tag);
139 	u64 ref = get_unaligned_be48(pi->ref_tag);
140 	u16 app = get_unaligned_be16(&pi->app_tag);
141 
142 	if (iter->bi->flags & BLK_INTEGRITY_REF_TAG) {
143 		if (app == APP_TAG_ESCAPE)
144 			return BLK_STS_OK;
145 		if (ref != seed) {
146 			pr_err("%s: ref tag error at location %llu (rcvd %llu)\n",
147 				iter->bio->bi_bdev->bd_disk->disk_name, seed,
148 				ref);
149 			return BLK_STS_PROTECTION;
150 		}
151 	} else if (app == APP_TAG_ESCAPE && ext_pi_ref_escape(pi->ref_tag)) {
152 		return BLK_STS_OK;
153 	}
154 
155 	if (guard != iter->csum) {
156 		pr_err("%s: guard tag error at sector %llu (rcvd %016llx, want %016llx)\n",
157 			iter->bio->bi_bdev->bd_disk->disk_name, iter->seed,
158 			guard, iter->csum);
159 		return BLK_STS_PROTECTION;
160 	}
161 
162 	return BLK_STS_OK;
163 }
164 
blk_verify_pi(struct blk_integrity_iter * iter,struct t10_pi_tuple * pi,u16 guard)165 static blk_status_t blk_verify_pi(struct blk_integrity_iter *iter,
166 				      struct t10_pi_tuple *pi, u16 guard)
167 {
168 	u32 seed = lower_32_bits(iter->seed);
169 	u32 ref = get_unaligned_be32(&pi->ref_tag);
170 	u16 app = get_unaligned_be16(&pi->app_tag);
171 
172 	if (iter->bi->flags & BLK_INTEGRITY_REF_TAG) {
173 		if (app == APP_TAG_ESCAPE)
174 			return BLK_STS_OK;
175 		if (ref != seed) {
176 			pr_err("%s: ref tag error at location %u (rcvd %u)\n",
177 				iter->bio->bi_bdev->bd_disk->disk_name, seed,
178 				ref);
179 			return BLK_STS_PROTECTION;
180 		}
181 	} else if (app == APP_TAG_ESCAPE && ref == REF_TAG_ESCAPE) {
182 		return BLK_STS_OK;
183 	}
184 
185 	if (guard != (u16)iter->csum) {
186 		pr_err("%s: guard tag error at sector %llu (rcvd %04x, want %04x)\n",
187 			iter->bio->bi_bdev->bd_disk->disk_name, iter->seed,
188 			guard, (u16)iter->csum);
189 		return BLK_STS_PROTECTION;
190 	}
191 
192 	return BLK_STS_OK;
193 }
194 
blk_verify_t10_pi(struct blk_integrity_iter * iter,struct t10_pi_tuple * pi)195 static blk_status_t blk_verify_t10_pi(struct blk_integrity_iter *iter,
196 				      struct t10_pi_tuple *pi)
197 {
198 	u16 guard = get_unaligned_be16(&pi->guard_tag);
199 
200 	return blk_verify_pi(iter, pi, guard);
201 }
202 
blk_verify_ip_pi(struct blk_integrity_iter * iter,struct t10_pi_tuple * pi)203 static blk_status_t blk_verify_ip_pi(struct blk_integrity_iter *iter,
204 				     struct t10_pi_tuple *pi)
205 {
206 	u16 guard = get_unaligned((u16 *)&pi->guard_tag);
207 
208 	return blk_verify_pi(iter, pi, guard);
209 }
210 
blk_integrity_verify(struct blk_integrity_iter * iter,union pi_tuple * tuple)211 static blk_status_t blk_integrity_verify(struct blk_integrity_iter *iter,
212 					 union pi_tuple *tuple)
213 {
214 	switch (iter->bi->csum_type) {
215 	case BLK_INTEGRITY_CSUM_CRC64:
216 		return blk_verify_ext_pi(iter, &tuple->crc64_pi);
217 	case BLK_INTEGRITY_CSUM_CRC:
218 		return blk_verify_t10_pi(iter, &tuple->t10_pi);
219 	case BLK_INTEGRITY_CSUM_IP:
220 		return blk_verify_ip_pi(iter, &tuple->t10_pi);
221 	default:
222 		return BLK_STS_OK;
223 	}
224 }
225 
blk_set_ext_pi(struct blk_integrity_iter * iter,struct crc64_pi_tuple * pi)226 static void blk_set_ext_pi(struct blk_integrity_iter *iter,
227 			   struct crc64_pi_tuple *pi)
228 {
229 	put_unaligned_be64(iter->csum, &pi->guard_tag);
230 	put_unaligned_be16(0, &pi->app_tag);
231 	put_unaligned_be48(iter->seed, &pi->ref_tag);
232 }
233 
blk_set_pi(struct blk_integrity_iter * iter,struct t10_pi_tuple * pi,__be16 csum)234 static void blk_set_pi(struct blk_integrity_iter *iter,
235 		       struct t10_pi_tuple *pi, __be16 csum)
236 {
237 	put_unaligned(csum, &pi->guard_tag);
238 	put_unaligned_be16(0, &pi->app_tag);
239 	put_unaligned_be32(iter->seed, &pi->ref_tag);
240 }
241 
blk_set_t10_pi(struct blk_integrity_iter * iter,struct t10_pi_tuple * pi)242 static void blk_set_t10_pi(struct blk_integrity_iter *iter,
243 			   struct t10_pi_tuple *pi)
244 {
245 	blk_set_pi(iter, pi, cpu_to_be16((u16)iter->csum));
246 }
247 
blk_set_ip_pi(struct blk_integrity_iter * iter,struct t10_pi_tuple * pi)248 static void blk_set_ip_pi(struct blk_integrity_iter *iter,
249 			  struct t10_pi_tuple *pi)
250 {
251 	blk_set_pi(iter, pi, (__force __be16)(u16)iter->csum);
252 }
253 
blk_integrity_set(struct blk_integrity_iter * iter,union pi_tuple * tuple)254 static void blk_integrity_set(struct blk_integrity_iter *iter,
255 			      union pi_tuple *tuple)
256 {
257 	switch (iter->bi->csum_type) {
258 	case BLK_INTEGRITY_CSUM_CRC64:
259 		return blk_set_ext_pi(iter, &tuple->crc64_pi);
260 	case BLK_INTEGRITY_CSUM_CRC:
261 		return blk_set_t10_pi(iter, &tuple->t10_pi);
262 	case BLK_INTEGRITY_CSUM_IP:
263 		return blk_set_ip_pi(iter, &tuple->t10_pi);
264 	default:
265 		WARN_ON_ONCE(1);
266 		return;
267 	}
268 }
269 
blk_integrity_interval(struct blk_integrity_iter * iter,bool verify)270 static blk_status_t blk_integrity_interval(struct blk_integrity_iter *iter,
271 					   bool verify)
272 {
273 	blk_status_t ret = BLK_STS_OK;
274 	union pi_tuple tuple;
275 	void *ptuple = &tuple;
276 	struct bio_vec pbv;
277 
278 	blk_integrity_csum_offset(iter);
279 	pbv = bvec_iter_bvec(iter->bip->bip_vec, iter->prot_iter);
280 	if (pbv.bv_len >= iter->bi->pi_tuple_size) {
281 		ptuple = bvec_kmap_local(&pbv);
282 		bvec_iter_advance_single(iter->bip->bip_vec, &iter->prot_iter,
283 				iter->bi->metadata_size - iter->bi->pi_offset);
284 	} else if (verify) {
285 		blk_integrity_copy_to_tuple(iter->bip, &iter->prot_iter,
286 				ptuple, iter->bi->pi_tuple_size);
287 	}
288 
289 	if (verify)
290 		ret = blk_integrity_verify(iter, ptuple);
291 	else
292 		blk_integrity_set(iter, ptuple);
293 
294 	if (likely(ptuple != &tuple)) {
295 		kunmap_local(ptuple);
296 	} else if (!verify) {
297 		blk_integrity_copy_from_tuple(iter->bip, &iter->prot_iter,
298 				ptuple, iter->bi->pi_tuple_size);
299 	}
300 
301 	iter->interval_remaining = 1 << iter->bi->interval_exp;
302 	iter->csum = 0;
303 	iter->seed++;
304 	return ret;
305 }
306 
blk_integrity_iterate(struct bio * bio,struct bvec_iter * data_iter,bool verify)307 static blk_status_t blk_integrity_iterate(struct bio *bio,
308 					  struct bvec_iter *data_iter,
309 					  bool verify)
310 {
311 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
312 	struct bio_integrity_payload *bip = bio_integrity(bio);
313 	struct blk_integrity_iter iter = {
314 		.bio = bio,
315 		.bip = bip,
316 		.bi = bi,
317 		.data_iter = *data_iter,
318 		.prot_iter = bip->bip_iter,
319 		.interval_remaining = 1 << bi->interval_exp,
320 		.seed = data_iter->bi_sector,
321 		.csum = 0,
322 	};
323 	blk_status_t ret = BLK_STS_OK;
324 
325 	while (iter.data_iter.bi_size && ret == BLK_STS_OK) {
326 		struct bio_vec bv = bvec_iter_bvec(iter.bio->bi_io_vec,
327 						   iter.data_iter);
328 		void *kaddr = bvec_kmap_local(&bv);
329 		void *data = kaddr;
330 		unsigned int len;
331 
332 		bvec_iter_advance_single(iter.bio->bi_io_vec, &iter.data_iter,
333 					 bv.bv_len);
334 		while (bv.bv_len && ret == BLK_STS_OK) {
335 			len = min(iter.interval_remaining, bv.bv_len);
336 			blk_calculate_guard(&iter, data, len);
337 			bv.bv_len -= len;
338 			data += len;
339 			iter.interval_remaining -= len;
340 			if (!iter.interval_remaining)
341 				ret = blk_integrity_interval(&iter, verify);
342 		}
343 		kunmap_local(kaddr);
344 	}
345 
346 	return ret;
347 }
348 
bio_integrity_generate(struct bio * bio)349 void bio_integrity_generate(struct bio *bio)
350 {
351 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
352 
353 	switch (bi->csum_type) {
354 	case BLK_INTEGRITY_CSUM_CRC64:
355 	case BLK_INTEGRITY_CSUM_CRC:
356 	case BLK_INTEGRITY_CSUM_IP:
357 		blk_integrity_iterate(bio, &bio->bi_iter, false);
358 		break;
359 	default:
360 		break;
361 	}
362 }
363 
bio_integrity_verify(struct bio * bio,struct bvec_iter * saved_iter)364 blk_status_t bio_integrity_verify(struct bio *bio, struct bvec_iter *saved_iter)
365 {
366 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
367 
368 	switch (bi->csum_type) {
369 	case BLK_INTEGRITY_CSUM_CRC64:
370 	case BLK_INTEGRITY_CSUM_CRC:
371 	case BLK_INTEGRITY_CSUM_IP:
372 		return blk_integrity_iterate(bio, saved_iter, true);
373 	default:
374 		break;
375 	}
376 
377 	return BLK_STS_OK;
378 }
379 
380 /*
381  * Advance @iter past the protection offset for protection formats that
382  * contain front padding on the metadata region.
383  */
blk_pi_advance_offset(struct blk_integrity * bi,struct bio_integrity_payload * bip,struct bvec_iter * iter)384 static void blk_pi_advance_offset(struct blk_integrity *bi,
385 				  struct bio_integrity_payload *bip,
386 				  struct bvec_iter *iter)
387 {
388 	unsigned int offset = bi->pi_offset;
389 
390 	while (offset > 0) {
391 		struct bio_vec bv = mp_bvec_iter_bvec(bip->bip_vec, *iter);
392 		unsigned int len = min(bv.bv_len, offset);
393 
394 		bvec_iter_advance_single(bip->bip_vec, iter, len);
395 		offset -= len;
396 	}
397 }
398 
blk_tuple_remap_begin(union pi_tuple * tuple,struct blk_integrity * bi,struct bio_integrity_payload * bip,struct bvec_iter * iter)399 static void *blk_tuple_remap_begin(union pi_tuple *tuple,
400 				   struct blk_integrity *bi,
401 				   struct bio_integrity_payload *bip,
402 				   struct bvec_iter *iter)
403 {
404 	struct bvec_iter titer;
405 	struct bio_vec pbv;
406 
407 	blk_pi_advance_offset(bi, bip, iter);
408 	pbv = bvec_iter_bvec(bip->bip_vec, *iter);
409 	if (likely(pbv.bv_len >= bi->pi_tuple_size))
410 		return bvec_kmap_local(&pbv);
411 
412 	/*
413 	 * We need to preserve the state of the original iter for the
414 	 * copy_from_tuple at the end, so make a temp iter for here.
415 	 */
416 	titer = *iter;
417 	blk_integrity_copy_to_tuple(bip, &titer, tuple, bi->pi_tuple_size);
418 	return tuple;
419 }
420 
blk_tuple_remap_end(union pi_tuple * tuple,void * ptuple,struct blk_integrity * bi,struct bio_integrity_payload * bip,struct bvec_iter * iter)421 static void blk_tuple_remap_end(union pi_tuple *tuple, void *ptuple,
422 				struct blk_integrity *bi,
423 				struct bio_integrity_payload *bip,
424 				struct bvec_iter *iter)
425 {
426 	unsigned int len = bi->metadata_size - bi->pi_offset;
427 
428 	if (likely(ptuple != tuple)) {
429 		kunmap_local(ptuple);
430 	} else {
431 		blk_integrity_copy_from_tuple(bip, iter, ptuple,
432 				bi->pi_tuple_size);
433 		len -= bi->pi_tuple_size;
434 	}
435 
436 	bvec_iter_advance(bip->bip_vec, iter, len);
437 }
438 
blk_set_ext_unmap_ref(struct crc64_pi_tuple * pi,u64 virt,u64 ref_tag)439 static void blk_set_ext_unmap_ref(struct crc64_pi_tuple *pi, u64 virt,
440 				  u64 ref_tag)
441 {
442 	u64 ref = get_unaligned_be48(&pi->ref_tag);
443 
444 	if (ref == lower_48_bits(ref_tag) && ref != lower_48_bits(virt))
445 		put_unaligned_be48(virt, pi->ref_tag);
446 }
447 
blk_set_t10_unmap_ref(struct t10_pi_tuple * pi,u32 virt,u32 ref_tag)448 static void blk_set_t10_unmap_ref(struct t10_pi_tuple *pi, u32 virt,
449 				  u32 ref_tag)
450 {
451 	u32 ref = get_unaligned_be32(&pi->ref_tag);
452 
453 	if (ref == ref_tag && ref != virt)
454 		put_unaligned_be32(virt, &pi->ref_tag);
455 }
456 
blk_reftag_remap_complete(struct blk_integrity * bi,union pi_tuple * tuple,u64 virt,u64 ref)457 static void blk_reftag_remap_complete(struct blk_integrity *bi,
458 				      union pi_tuple *tuple, u64 virt, u64 ref)
459 {
460 	switch (bi->csum_type) {
461 	case BLK_INTEGRITY_CSUM_CRC64:
462 		blk_set_ext_unmap_ref(&tuple->crc64_pi, virt, ref);
463 		break;
464 	case BLK_INTEGRITY_CSUM_CRC:
465 	case BLK_INTEGRITY_CSUM_IP:
466 		blk_set_t10_unmap_ref(&tuple->t10_pi, virt, ref);
467 		break;
468 	default:
469 		WARN_ON_ONCE(1);
470 		break;
471 	}
472 }
473 
blk_set_ext_map_ref(struct crc64_pi_tuple * pi,u64 virt,u64 ref_tag)474 static void blk_set_ext_map_ref(struct crc64_pi_tuple *pi, u64 virt,
475 				u64 ref_tag)
476 {
477 	u64 ref = get_unaligned_be48(&pi->ref_tag);
478 
479 	if (ref == lower_48_bits(virt) && ref != ref_tag)
480 		put_unaligned_be48(ref_tag, pi->ref_tag);
481 }
482 
blk_set_t10_map_ref(struct t10_pi_tuple * pi,u32 virt,u32 ref_tag)483 static void blk_set_t10_map_ref(struct t10_pi_tuple *pi, u32 virt, u32 ref_tag)
484 {
485 	u32 ref = get_unaligned_be32(&pi->ref_tag);
486 
487 	if (ref == virt && ref != ref_tag)
488 		put_unaligned_be32(ref_tag, &pi->ref_tag);
489 }
490 
blk_reftag_remap_prepare(struct blk_integrity * bi,union pi_tuple * tuple,u64 virt,u64 ref)491 static void blk_reftag_remap_prepare(struct blk_integrity *bi,
492 				     union pi_tuple *tuple,
493 				     u64 virt, u64 ref)
494 {
495 	switch (bi->csum_type) {
496 	case BLK_INTEGRITY_CSUM_CRC64:
497 		blk_set_ext_map_ref(&tuple->crc64_pi, virt, ref);
498 		break;
499 	case BLK_INTEGRITY_CSUM_CRC:
500 	case BLK_INTEGRITY_CSUM_IP:
501 		blk_set_t10_map_ref(&tuple->t10_pi, virt, ref);
502 		break;
503 	default:
504 		WARN_ON_ONCE(1);
505 		break;
506 	}
507 }
508 
__blk_reftag_remap(struct bio * bio,struct blk_integrity * bi,unsigned * intervals,u64 * ref,bool prep)509 static void __blk_reftag_remap(struct bio *bio, struct blk_integrity *bi,
510 			       unsigned *intervals, u64 *ref, bool prep)
511 {
512 	struct bio_integrity_payload *bip = bio_integrity(bio);
513 	struct bvec_iter iter = bip->bip_iter;
514 	u64 virt = bip_get_seed(bip);
515 	union pi_tuple *ptuple;
516 	union pi_tuple tuple;
517 
518 	if (prep && bip->bip_flags & BIP_MAPPED_INTEGRITY) {
519 		*ref += bio->bi_iter.bi_size >> bi->interval_exp;
520 		return;
521 	}
522 
523 	while (iter.bi_size && *intervals) {
524 		ptuple = blk_tuple_remap_begin(&tuple, bi, bip, &iter);
525 
526 		if (prep)
527 			blk_reftag_remap_prepare(bi, ptuple, virt, *ref);
528 		else
529 			blk_reftag_remap_complete(bi, ptuple, virt, *ref);
530 
531 		blk_tuple_remap_end(&tuple, ptuple, bi, bip, &iter);
532 		(*intervals)--;
533 		(*ref)++;
534 		virt++;
535 	}
536 
537 	if (prep)
538 		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
539 }
540 
blk_integrity_remap(struct request * rq,unsigned int nr_bytes,bool prep)541 static void blk_integrity_remap(struct request *rq, unsigned int nr_bytes,
542 				bool prep)
543 {
544 	struct blk_integrity *bi = &rq->q->limits.integrity;
545 	u64 ref = blk_rq_pos(rq) >> (bi->interval_exp - SECTOR_SHIFT);
546 	unsigned intervals = nr_bytes >> bi->interval_exp;
547 	struct bio *bio;
548 
549 	if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
550 		return;
551 
552 	__rq_for_each_bio(bio, rq) {
553 		__blk_reftag_remap(bio, bi, &intervals, &ref, prep);
554 		if (!intervals)
555 			break;
556 	}
557 }
558 
blk_integrity_prepare(struct request * rq)559 void blk_integrity_prepare(struct request *rq)
560 {
561 	blk_integrity_remap(rq, blk_rq_bytes(rq), true);
562 }
563 
blk_integrity_complete(struct request * rq,unsigned int nr_bytes)564 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes)
565 {
566 	blk_integrity_remap(rq, nr_bytes, false);
567 }
568