1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2003 Sistina Software (UK) Limited.
4  * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include <linux/device-mapper.h>
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/blkdev.h>
14 #include <linux/bio.h>
15 #include <linux/slab.h>
16 
17 #define DM_MSG_PREFIX "flakey"
18 
19 #define PROBABILITY_BASE	1000000000
20 
21 #define all_corrupt_bio_flags_match(bio, fc)	\
22 	(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
23 
24 /*
25  * Flakey: Used for testing only, simulates intermittent,
26  * catastrophic device failure.
27  */
28 struct flakey_c {
29 	struct dm_dev *dev;
30 	unsigned long start_time;
31 	sector_t start;
32 	unsigned int up_interval;
33 	unsigned int down_interval;
34 	unsigned long flags;
35 	unsigned int corrupt_bio_byte;
36 	unsigned int corrupt_bio_rw;
37 	unsigned int corrupt_bio_value;
38 	blk_opf_t corrupt_bio_flags;
39 	unsigned int random_read_corrupt;
40 	unsigned int random_write_corrupt;
41 };
42 
43 enum feature_flag_bits {
44 	ERROR_READS,
45 	DROP_WRITES,
46 	ERROR_WRITES
47 };
48 
49 struct per_bio_data {
50 	bool bio_can_corrupt;
51 	struct bvec_iter saved_iter;
52 };
53 
54 static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
55 			  struct dm_target *ti)
56 {
57 	int r = 0;
58 	unsigned int argc = 0;
59 	const char *arg_name;
60 
61 	static const struct dm_arg _args[] = {
62 		{0, 11, "Invalid number of feature args"},
63 		{1, UINT_MAX, "Invalid corrupt bio byte"},
64 		{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
65 		{0, UINT_MAX, "Invalid corrupt bio flags mask"},
66 		{0, PROBABILITY_BASE, "Invalid random corrupt argument"},
67 	};
68 
69 	if (as->argc && (r = dm_read_arg_group(_args, as, &argc, &ti->error)))
70 		return r;
71 
72 	/* No feature arguments supplied. */
73 	if (!argc)
74 		goto error_all_io;
75 
76 	while (argc) {
77 		arg_name = dm_shift_arg(as);
78 		argc--;
79 
80 		if (!arg_name) {
81 			ti->error = "Insufficient feature arguments";
82 			return -EINVAL;
83 		}
84 
85 		/*
86 		 * error_reads
87 		 */
88 		if (!strcasecmp(arg_name, "error_reads")) {
89 			if (test_and_set_bit(ERROR_READS, &fc->flags)) {
90 				ti->error = "Feature error_reads duplicated";
91 				return -EINVAL;
92 			}
93 			continue;
94 		}
95 
96 		/*
97 		 * drop_writes
98 		 */
99 		if (!strcasecmp(arg_name, "drop_writes")) {
100 			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
101 				ti->error = "Feature drop_writes duplicated";
102 				return -EINVAL;
103 			} else if (test_bit(ERROR_WRITES, &fc->flags)) {
104 				ti->error = "Feature drop_writes conflicts with feature error_writes";
105 				return -EINVAL;
106 			}
107 
108 			continue;
109 		}
110 
111 		/*
112 		 * error_writes
113 		 */
114 		if (!strcasecmp(arg_name, "error_writes")) {
115 			if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
116 				ti->error = "Feature error_writes duplicated";
117 				return -EINVAL;
118 
119 			} else if (test_bit(DROP_WRITES, &fc->flags)) {
120 				ti->error = "Feature error_writes conflicts with feature drop_writes";
121 				return -EINVAL;
122 			}
123 
124 			continue;
125 		}
126 
127 		/*
128 		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
129 		 */
130 		if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
131 			if (fc->corrupt_bio_byte) {
132 				ti->error = "Feature corrupt_bio_byte duplicated";
133 				return -EINVAL;
134 			} else if (argc < 4) {
135 				ti->error = "Feature corrupt_bio_byte requires 4 parameters";
136 				return -EINVAL;
137 			}
138 
139 			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
140 			if (r)
141 				return r;
142 			argc--;
143 
144 			/*
145 			 * Direction r or w?
146 			 */
147 			arg_name = dm_shift_arg(as);
148 			if (arg_name && !strcasecmp(arg_name, "w"))
149 				fc->corrupt_bio_rw = WRITE;
150 			else if (arg_name && !strcasecmp(arg_name, "r"))
151 				fc->corrupt_bio_rw = READ;
152 			else {
153 				ti->error = "Invalid corrupt bio direction (r or w)";
154 				return -EINVAL;
155 			}
156 			argc--;
157 
158 			/*
159 			 * Value of byte (0-255) to write in place of correct one.
160 			 */
161 			r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
162 			if (r)
163 				return r;
164 			argc--;
165 
166 			/*
167 			 * Only corrupt bios with these flags set.
168 			 */
169 			BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
170 				     sizeof(unsigned int));
171 			r = dm_read_arg(_args + 3, as,
172 				(__force unsigned int *)&fc->corrupt_bio_flags,
173 				&ti->error);
174 			if (r)
175 				return r;
176 			argc--;
177 
178 			continue;
179 		}
180 
181 		if (!strcasecmp(arg_name, "random_read_corrupt")) {
182 			if (fc->random_read_corrupt) {
183 				ti->error = "Feature random_read_corrupt duplicated";
184 				return -EINVAL;
185 			} else if (!argc) {
186 				ti->error = "Feature random_read_corrupt requires a parameter";
187 				return -EINVAL;
188 			}
189 			r = dm_read_arg(_args + 4, as, &fc->random_read_corrupt, &ti->error);
190 			if (r)
191 				return r;
192 			argc--;
193 
194 			continue;
195 		}
196 
197 		if (!strcasecmp(arg_name, "random_write_corrupt")) {
198 			if (fc->random_write_corrupt) {
199 				ti->error = "Feature random_write_corrupt duplicated";
200 				return -EINVAL;
201 			} else if (!argc) {
202 				ti->error = "Feature random_write_corrupt requires a parameter";
203 				return -EINVAL;
204 			}
205 			r = dm_read_arg(_args + 4, as, &fc->random_write_corrupt, &ti->error);
206 			if (r)
207 				return r;
208 			argc--;
209 
210 			continue;
211 		}
212 
213 		ti->error = "Unrecognised flakey feature requested";
214 		return -EINVAL;
215 	}
216 
217 	if (test_bit(DROP_WRITES, &fc->flags) &&
218 	    (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
219 		ti->error = "drop_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
220 		return -EINVAL;
221 
222 	} else if (test_bit(ERROR_WRITES, &fc->flags) &&
223 		   (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
224 		ti->error = "error_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
225 		return -EINVAL;
226 	} else if (test_bit(ERROR_READS, &fc->flags) &&
227 		   (fc->corrupt_bio_rw == READ || fc->random_read_corrupt)) {
228 		ti->error = "error_reads is incompatible with random_read_corrupt or corrupt_bio_byte with the READ flag set";
229 		return -EINVAL;
230 	}
231 
232 	if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) &&
233 	    !test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) &&
234 	    !fc->random_read_corrupt && !fc->random_write_corrupt) {
235 error_all_io:
236 		set_bit(ERROR_WRITES, &fc->flags);
237 		set_bit(ERROR_READS, &fc->flags);
238 	}
239 
240 	return 0;
241 }
242 
243 /*
244  * Construct a flakey mapping:
245  * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
246  *
247  *   Feature args:
248  *     [drop_writes]
249  *     [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
250  *
251  *   Nth_byte starts from 1 for the first byte.
252  *   Direction is r for READ or w for WRITE.
253  *   bio_flags is ignored if 0.
254  */
255 static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
256 {
257 	static const struct dm_arg _args[] = {
258 		{0, UINT_MAX, "Invalid up interval"},
259 		{0, UINT_MAX, "Invalid down interval"},
260 	};
261 
262 	int r;
263 	struct flakey_c *fc;
264 	unsigned long long tmpll;
265 	struct dm_arg_set as;
266 	const char *devname;
267 	char dummy;
268 
269 	as.argc = argc;
270 	as.argv = argv;
271 
272 	if (argc < 4) {
273 		ti->error = "Invalid argument count";
274 		return -EINVAL;
275 	}
276 
277 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
278 	if (!fc) {
279 		ti->error = "Cannot allocate context";
280 		return -ENOMEM;
281 	}
282 	fc->start_time = jiffies;
283 
284 	devname = dm_shift_arg(&as);
285 
286 	r = -EINVAL;
287 	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
288 		ti->error = "Invalid device sector";
289 		goto bad;
290 	}
291 	fc->start = tmpll;
292 
293 	r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
294 	if (r)
295 		goto bad;
296 
297 	r = dm_read_arg(_args + 1, &as, &fc->down_interval, &ti->error);
298 	if (r)
299 		goto bad;
300 
301 	if (!(fc->up_interval + fc->down_interval)) {
302 		ti->error = "Total (up + down) interval is zero";
303 		r = -EINVAL;
304 		goto bad;
305 	}
306 
307 	if (fc->up_interval + fc->down_interval < fc->up_interval) {
308 		ti->error = "Interval overflow";
309 		r = -EINVAL;
310 		goto bad;
311 	}
312 
313 	r = parse_features(&as, fc, ti);
314 	if (r)
315 		goto bad;
316 
317 	r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
318 	if (r) {
319 		ti->error = "Device lookup failed";
320 		goto bad;
321 	}
322 
323 	ti->num_flush_bios = 1;
324 	ti->num_discard_bios = 1;
325 	ti->per_io_data_size = sizeof(struct per_bio_data);
326 	ti->private = fc;
327 	return 0;
328 
329 bad:
330 	kfree(fc);
331 	return r;
332 }
333 
334 static void flakey_dtr(struct dm_target *ti)
335 {
336 	struct flakey_c *fc = ti->private;
337 
338 	dm_put_device(ti, fc->dev);
339 	kfree(fc);
340 }
341 
342 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
343 {
344 	struct flakey_c *fc = ti->private;
345 
346 	return fc->start + dm_target_offset(ti, bi_sector);
347 }
348 
349 static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
350 {
351 	struct flakey_c *fc = ti->private;
352 
353 	bio_set_dev(bio, fc->dev->bdev);
354 	bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
355 }
356 
357 static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
358 			       unsigned char corrupt_bio_value,
359 			       struct bvec_iter start)
360 {
361 	struct bvec_iter iter;
362 	struct bio_vec bvec;
363 
364 	/*
365 	 * Overwrite the Nth byte of the bio's data, on whichever page
366 	 * it falls.
367 	 */
368 	__bio_for_each_segment(bvec, bio, iter, start) {
369 		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
370 			unsigned char *segment = bvec_kmap_local(&bvec);
371 			segment[corrupt_bio_byte] = corrupt_bio_value;
372 			kunmap_local(segment);
373 			DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
374 				"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
375 				bio, corrupt_bio_value, corrupt_bio_byte,
376 				(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
377 				(unsigned long long)start.bi_sector,
378 				start.bi_size);
379 			break;
380 		}
381 		corrupt_bio_byte -= bio_iter_len(bio, iter);
382 	}
383 }
384 
385 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc,
386 			     struct bvec_iter start)
387 {
388 	unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
389 
390 	corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value, start);
391 }
392 
393 static void corrupt_bio_random(struct bio *bio, struct bvec_iter start)
394 {
395 	unsigned int corrupt_byte;
396 	unsigned char corrupt_value;
397 
398 	corrupt_byte = get_random_u32() % start.bi_size;
399 	corrupt_value = get_random_u8();
400 
401 	corrupt_bio_common(bio, corrupt_byte, corrupt_value, start);
402 }
403 
404 static void clone_free(struct bio *clone)
405 {
406 	struct folio_iter fi;
407 
408 	if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
409 		bio_for_each_folio_all(fi, clone)
410 			folio_put(fi.folio);
411 	}
412 
413 	bio_uninit(clone);
414 	kfree(clone);
415 }
416 
417 static void clone_endio(struct bio *clone)
418 {
419 	struct bio *bio = clone->bi_private;
420 	bio->bi_status = clone->bi_status;
421 	clone_free(clone);
422 	bio_endio(bio);
423 }
424 
425 static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct bio *bio)
426 {
427 	struct bio *clone;
428 	unsigned size, remaining_size, nr_iovecs, order;
429 	struct bvec_iter iter = bio->bi_iter;
430 
431 	if (unlikely(bio->bi_iter.bi_size > UIO_MAXIOV << PAGE_SHIFT))
432 		dm_accept_partial_bio(bio, UIO_MAXIOV << PAGE_SHIFT >> SECTOR_SHIFT);
433 
434 	size = bio->bi_iter.bi_size;
435 	nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
436 
437 	clone = bio_kmalloc(nr_iovecs, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
438 	if (!clone)
439 		return NULL;
440 
441 	bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
442 
443 	clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
444 	clone->bi_private = bio;
445 	clone->bi_end_io = clone_endio;
446 
447 	remaining_size = size;
448 
449 	order = MAX_PAGE_ORDER;
450 	while (remaining_size) {
451 		struct page *pages;
452 		unsigned size_to_add, to_copy;
453 		unsigned char *virt;
454 		unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
455 		order = min(order, remaining_order);
456 
457 retry_alloc_pages:
458 		pages = alloc_pages(GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP, order);
459 		if (unlikely(!pages)) {
460 			if (order) {
461 				order--;
462 				goto retry_alloc_pages;
463 			}
464 			clone_free(clone);
465 			return NULL;
466 		}
467 		size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
468 
469 		virt = page_to_virt(pages);
470 		to_copy = size_to_add;
471 		do {
472 			struct bio_vec bvec = bvec_iter_bvec(bio->bi_io_vec, iter);
473 			unsigned this_step = min(bvec.bv_len, to_copy);
474 			void *map = bvec_kmap_local(&bvec);
475 			memcpy(virt, map, this_step);
476 			kunmap_local(map);
477 
478 			bvec_iter_advance(bio->bi_io_vec, &iter, this_step);
479 			to_copy -= this_step;
480 			virt += this_step;
481 		} while (to_copy);
482 
483 		__bio_add_page(clone, pages, size_to_add, 0);
484 		remaining_size -= size_to_add;
485 	}
486 
487 	return clone;
488 }
489 
490 static int flakey_map(struct dm_target *ti, struct bio *bio)
491 {
492 	struct flakey_c *fc = ti->private;
493 	unsigned int elapsed;
494 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
495 
496 	pb->bio_can_corrupt = false;
497 
498 	if (op_is_zone_mgmt(bio_op(bio)))
499 		goto map_bio;
500 
501 	/* Are we alive ? */
502 	elapsed = (jiffies - fc->start_time) / HZ;
503 	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
504 		bool corrupt_fixed, corrupt_random;
505 
506 		if (bio_has_data(bio)) {
507 			pb->bio_can_corrupt = true;
508 			pb->saved_iter = bio->bi_iter;
509 		}
510 
511 		/*
512 		 * If ERROR_READS isn't set flakey_end_io() will decide if the
513 		 * reads should be modified.
514 		 */
515 		if (bio_data_dir(bio) == READ) {
516 			if (test_bit(ERROR_READS, &fc->flags))
517 				return DM_MAPIO_KILL;
518 			goto map_bio;
519 		}
520 
521 		/*
522 		 * Drop or error writes?
523 		 */
524 		if (test_bit(DROP_WRITES, &fc->flags)) {
525 			bio_endio(bio);
526 			return DM_MAPIO_SUBMITTED;
527 		} else if (test_bit(ERROR_WRITES, &fc->flags)) {
528 			bio_io_error(bio);
529 			return DM_MAPIO_SUBMITTED;
530 		}
531 
532 		if (!pb->bio_can_corrupt)
533 			goto map_bio;
534 		/*
535 		 * Corrupt matching writes.
536 		 */
537 		corrupt_fixed = false;
538 		corrupt_random = false;
539 		if (fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) {
540 			if (all_corrupt_bio_flags_match(bio, fc))
541 				corrupt_fixed = true;
542 		}
543 		if (fc->random_write_corrupt) {
544 			u64 rnd = get_random_u64();
545 			u32 rem = do_div(rnd, PROBABILITY_BASE);
546 			if (rem < fc->random_write_corrupt)
547 				corrupt_random = true;
548 		}
549 		if (corrupt_fixed || corrupt_random) {
550 			struct bio *clone = clone_bio(ti, fc, bio);
551 			if (clone) {
552 				if (corrupt_fixed)
553 					corrupt_bio_data(clone, fc,
554 							 clone->bi_iter);
555 				if (corrupt_random)
556 					corrupt_bio_random(clone,
557 							   clone->bi_iter);
558 				submit_bio(clone);
559 				return DM_MAPIO_SUBMITTED;
560 			}
561 		}
562 	}
563 
564 map_bio:
565 	flakey_map_bio(ti, bio);
566 
567 	return DM_MAPIO_REMAPPED;
568 }
569 
570 static int flakey_end_io(struct dm_target *ti, struct bio *bio,
571 			 blk_status_t *error)
572 {
573 	struct flakey_c *fc = ti->private;
574 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
575 
576 	if (op_is_zone_mgmt(bio_op(bio)))
577 		return DM_ENDIO_DONE;
578 
579 	if (!*error && pb->bio_can_corrupt && (bio_data_dir(bio) == READ)) {
580 		if (fc->corrupt_bio_byte) {
581 			if ((fc->corrupt_bio_rw == READ) &&
582 			    all_corrupt_bio_flags_match(bio, fc)) {
583 				/*
584 				 * Corrupt successful matching READs while in down state.
585 				 */
586 				corrupt_bio_data(bio, fc, pb->saved_iter);
587 			}
588 		}
589 		if (fc->random_read_corrupt) {
590 			u64 rnd = get_random_u64();
591 			u32 rem = do_div(rnd, PROBABILITY_BASE);
592 			if (rem < fc->random_read_corrupt)
593 				corrupt_bio_random(bio, pb->saved_iter);
594 		}
595 	}
596 
597 	return DM_ENDIO_DONE;
598 }
599 
600 static void flakey_status(struct dm_target *ti, status_type_t type,
601 			  unsigned int status_flags, char *result, unsigned int maxlen)
602 {
603 	unsigned int sz = 0;
604 	struct flakey_c *fc = ti->private;
605 	unsigned int error_reads, drop_writes, error_writes;
606 
607 	switch (type) {
608 	case STATUSTYPE_INFO:
609 		result[0] = '\0';
610 		break;
611 
612 	case STATUSTYPE_TABLE:
613 		DMEMIT("%s %llu %u %u", fc->dev->name,
614 		       (unsigned long long)fc->start, fc->up_interval,
615 		       fc->down_interval);
616 
617 		error_reads = test_bit(ERROR_READS, &fc->flags);
618 		drop_writes = test_bit(DROP_WRITES, &fc->flags);
619 		error_writes = test_bit(ERROR_WRITES, &fc->flags);
620 		DMEMIT(" %u", error_reads + drop_writes + error_writes +
621 			(fc->corrupt_bio_byte > 0) * 5 +
622 			(fc->random_read_corrupt > 0) * 2 +
623 			(fc->random_write_corrupt > 0) * 2);
624 
625 		if (error_reads)
626 			DMEMIT(" error_reads");
627 		if (drop_writes)
628 			DMEMIT(" drop_writes");
629 		else if (error_writes)
630 			DMEMIT(" error_writes");
631 
632 		if (fc->corrupt_bio_byte)
633 			DMEMIT(" corrupt_bio_byte %u %c %u %u",
634 			       fc->corrupt_bio_byte,
635 			       (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
636 			       fc->corrupt_bio_value, fc->corrupt_bio_flags);
637 
638 		if (fc->random_read_corrupt > 0)
639 			DMEMIT(" random_read_corrupt %u", fc->random_read_corrupt);
640 		if (fc->random_write_corrupt > 0)
641 			DMEMIT(" random_write_corrupt %u", fc->random_write_corrupt);
642 
643 		break;
644 
645 	case STATUSTYPE_IMA:
646 		result[0] = '\0';
647 		break;
648 	}
649 }
650 
651 static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
652 				unsigned int cmd, unsigned long arg,
653 				bool *forward)
654 {
655 	struct flakey_c *fc = ti->private;
656 
657 	*bdev = fc->dev->bdev;
658 
659 	/*
660 	 * Only pass ioctls through if the device sizes match exactly.
661 	 */
662 	if (fc->start || ti->len != bdev_nr_sectors((*bdev)))
663 		return 1;
664 	return 0;
665 }
666 
667 #ifdef CONFIG_BLK_DEV_ZONED
668 static int flakey_report_zones(struct dm_target *ti,
669 		struct dm_report_zones_args *args, unsigned int nr_zones)
670 {
671 	struct flakey_c *fc = ti->private;
672 
673 	return dm_report_zones(fc->dev->bdev, fc->start,
674 			       flakey_map_sector(ti, args->next_sector),
675 			       args, nr_zones);
676 }
677 #else
678 #define flakey_report_zones NULL
679 #endif
680 
681 static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
682 {
683 	struct flakey_c *fc = ti->private;
684 
685 	return fn(ti, fc->dev, fc->start, ti->len, data);
686 }
687 
688 static struct target_type flakey_target = {
689 	.name   = "flakey",
690 	.version = {1, 5, 0},
691 	.features = DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
692 	.report_zones = flakey_report_zones,
693 	.module = THIS_MODULE,
694 	.ctr    = flakey_ctr,
695 	.dtr    = flakey_dtr,
696 	.map    = flakey_map,
697 	.end_io = flakey_end_io,
698 	.status = flakey_status,
699 	.prepare_ioctl = flakey_prepare_ioctl,
700 	.iterate_devices = flakey_iterate_devices,
701 };
702 module_dm(flakey);
703 
704 MODULE_DESCRIPTION(DM_NAME " flakey target");
705 MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
706 MODULE_LICENSE("GPL");
707