xref: /linux/block/blk-zoned.c (revision c0e473a0d226479e8e925d5ba93f751d8df628e9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Zoned block device handling
4  *
5  * Copyright (c) 2015, Hannes Reinecke
6  * Copyright (c) 2015, SUSE Linux GmbH
7  *
8  * Copyright (c) 2016, Damien Le Moal
9  * Copyright (c) 2016, Western Digital
10  * Copyright (c) 2024, Western Digital Corporation or its affiliates.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/blkdev.h>
15 #include <linux/blk-mq.h>
16 #include <linux/spinlock.h>
17 #include <linux/refcount.h>
18 #include <linux/mempool.h>
19 
20 #include "blk.h"
21 #include "blk-mq-sched.h"
22 #include "blk-mq-debugfs.h"
23 
24 #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
25 static const char *const zone_cond_name[] = {
26 	ZONE_COND_NAME(NOT_WP),
27 	ZONE_COND_NAME(EMPTY),
28 	ZONE_COND_NAME(IMP_OPEN),
29 	ZONE_COND_NAME(EXP_OPEN),
30 	ZONE_COND_NAME(CLOSED),
31 	ZONE_COND_NAME(READONLY),
32 	ZONE_COND_NAME(FULL),
33 	ZONE_COND_NAME(OFFLINE),
34 };
35 #undef ZONE_COND_NAME
36 
37 /*
38  * Per-zone write plug.
39  * @node: hlist_node structure for managing the plug using a hash table.
40  * @ref: Zone write plug reference counter. A zone write plug reference is
41  *       always at least 1 when the plug is hashed in the disk plug hash table.
42  *       The reference is incremented whenever a new BIO needing plugging is
43  *       submitted and when a function needs to manipulate a plug. The
44  *       reference count is decremented whenever a plugged BIO completes and
45  *       when a function that referenced the plug returns. The initial
46  *       reference is dropped whenever the zone of the zone write plug is reset,
47  *       finished and when the zone becomes full (last write BIO to the zone
48  *       completes).
49  * @lock: Spinlock to atomically manipulate the plug.
50  * @flags: Flags indicating the plug state.
51  * @zone_no: The number of the zone the plug is managing.
52  * @wp_offset: The zone write pointer location relative to the start of the zone
53  *             as a number of 512B sectors.
54  * @bio_list: The list of BIOs that are currently plugged.
55  * @bio_work: Work struct to handle issuing of plugged BIOs
56  * @rcu_head: RCU head to free zone write plugs with an RCU grace period.
57  * @disk: The gendisk the plug belongs to.
58  */
59 struct blk_zone_wplug {
60 	struct hlist_node	node;
61 	refcount_t		ref;
62 	spinlock_t		lock;
63 	unsigned int		flags;
64 	unsigned int		zone_no;
65 	unsigned int		wp_offset;
66 	struct bio_list		bio_list;
67 	struct work_struct	bio_work;
68 	struct rcu_head		rcu_head;
69 	struct gendisk		*disk;
70 };
71 
72 /*
73  * Zone write plug flags bits:
74  *  - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged,
75  *    that is, that write BIOs are being throttled due to a write BIO already
76  *    being executed or the zone write plug bio list is not empty.
77  *  - BLK_ZONE_WPLUG_NEED_WP_UPDATE: Indicates that we lost track of a zone
78  *    write pointer offset and need to update it.
79  *  - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed
80  *    from the disk hash table and that the initial reference to the zone
81  *    write plug set when the plug was first added to the hash table has been
82  *    dropped. This flag is set when a zone is reset, finished or become full,
83  *    to prevent new references to the zone write plug to be taken for
84  *    newly incoming BIOs. A zone write plug flagged with this flag will be
85  *    freed once all remaining references from BIOs or functions are dropped.
86  */
87 #define BLK_ZONE_WPLUG_PLUGGED		(1U << 0)
88 #define BLK_ZONE_WPLUG_NEED_WP_UPDATE	(1U << 1)
89 #define BLK_ZONE_WPLUG_UNHASHED		(1U << 2)
90 
91 /**
92  * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
93  * @zone_cond: BLK_ZONE_COND_XXX.
94  *
95  * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
96  * into string format. Useful in the debugging and tracing zone conditions. For
97  * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
98  */
99 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
100 {
101 	static const char *zone_cond_str = "UNKNOWN";
102 
103 	if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
104 		zone_cond_str = zone_cond_name[zone_cond];
105 
106 	return zone_cond_str;
107 }
108 EXPORT_SYMBOL_GPL(blk_zone_cond_str);
109 
110 struct disk_report_zones_cb_args {
111 	struct gendisk	*disk;
112 	report_zones_cb	user_cb;
113 	void		*user_data;
114 };
115 
116 static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
117 					   struct blk_zone *zone);
118 
119 static int disk_report_zones_cb(struct blk_zone *zone, unsigned int idx,
120 				void *data)
121 {
122 	struct disk_report_zones_cb_args *args = data;
123 	struct gendisk *disk = args->disk;
124 
125 	if (disk->zone_wplugs_hash)
126 		disk_zone_wplug_sync_wp_offset(disk, zone);
127 
128 	if (!args->user_cb)
129 		return 0;
130 
131 	return args->user_cb(zone, idx, args->user_data);
132 }
133 
134 /**
135  * blkdev_report_zones - Get zones information
136  * @bdev:	Target block device
137  * @sector:	Sector from which to report zones
138  * @nr_zones:	Maximum number of zones to report
139  * @cb:		Callback function called for each reported zone
140  * @data:	Private data for the callback
141  *
142  * Description:
143  *    Get zone information starting from the zone containing @sector for at most
144  *    @nr_zones, and call @cb for each zone reported by the device.
145  *    To report all zones in a device starting from @sector, the BLK_ALL_ZONES
146  *    constant can be passed to @nr_zones.
147  *    Returns the number of zones reported by the device, or a negative errno
148  *    value in case of failure.
149  *
150  *    Note: The caller must use memalloc_noXX_save/restore() calls to control
151  *    memory allocations done within this function.
152  */
153 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
154 			unsigned int nr_zones, report_zones_cb cb, void *data)
155 {
156 	struct gendisk *disk = bdev->bd_disk;
157 	sector_t capacity = get_capacity(disk);
158 	struct disk_report_zones_cb_args args = {
159 		.disk = disk,
160 		.user_cb = cb,
161 		.user_data = data,
162 	};
163 
164 	if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
165 		return -EOPNOTSUPP;
166 
167 	if (!nr_zones || sector >= capacity)
168 		return 0;
169 
170 	return disk->fops->report_zones(disk, sector, nr_zones,
171 					disk_report_zones_cb, &args);
172 }
173 EXPORT_SYMBOL_GPL(blkdev_report_zones);
174 
175 static int blkdev_zone_reset_all(struct block_device *bdev)
176 {
177 	struct bio bio;
178 
179 	bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC);
180 	return submit_bio_wait(&bio);
181 }
182 
183 /**
184  * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
185  * @bdev:	Target block device
186  * @op:		Operation to be performed on the zones
187  * @sector:	Start sector of the first zone to operate on
188  * @nr_sectors:	Number of sectors, should be at least the length of one zone and
189  *		must be zone size aligned.
190  *
191  * Description:
192  *    Perform the specified operation on the range of zones specified by
193  *    @sector..@sector+@nr_sectors. Specifying the entire disk sector range
194  *    is valid, but the specified range should not contain conventional zones.
195  *    The operation to execute on each zone can be a zone reset, open, close
196  *    or finish request.
197  */
198 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
199 		     sector_t sector, sector_t nr_sectors)
200 {
201 	sector_t zone_sectors = bdev_zone_sectors(bdev);
202 	sector_t capacity = bdev_nr_sectors(bdev);
203 	sector_t end_sector = sector + nr_sectors;
204 	struct bio *bio = NULL;
205 	int ret = 0;
206 
207 	if (!bdev_is_zoned(bdev))
208 		return -EOPNOTSUPP;
209 
210 	if (bdev_read_only(bdev))
211 		return -EPERM;
212 
213 	if (!op_is_zone_mgmt(op))
214 		return -EOPNOTSUPP;
215 
216 	if (end_sector <= sector || end_sector > capacity)
217 		/* Out of range */
218 		return -EINVAL;
219 
220 	/* Check alignment (handle eventual smaller last zone) */
221 	if (!bdev_is_zone_start(bdev, sector))
222 		return -EINVAL;
223 
224 	if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity)
225 		return -EINVAL;
226 
227 	/*
228 	 * In the case of a zone reset operation over all zones, use
229 	 * REQ_OP_ZONE_RESET_ALL.
230 	 */
231 	if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity)
232 		return blkdev_zone_reset_all(bdev);
233 
234 	while (sector < end_sector) {
235 		bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL);
236 		bio->bi_iter.bi_sector = sector;
237 		sector += zone_sectors;
238 
239 		/* This may take a while, so be nice to others */
240 		cond_resched();
241 	}
242 
243 	ret = submit_bio_wait(bio);
244 	bio_put(bio);
245 
246 	return ret;
247 }
248 EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
249 
250 struct zone_report_args {
251 	struct blk_zone __user *zones;
252 };
253 
254 static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
255 				    void *data)
256 {
257 	struct zone_report_args *args = data;
258 
259 	if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
260 		return -EFAULT;
261 	return 0;
262 }
263 
264 /*
265  * BLKREPORTZONE ioctl processing.
266  * Called from blkdev_ioctl.
267  */
268 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
269 		unsigned long arg)
270 {
271 	void __user *argp = (void __user *)arg;
272 	struct zone_report_args args;
273 	struct blk_zone_report rep;
274 	int ret;
275 
276 	if (!argp)
277 		return -EINVAL;
278 
279 	if (!bdev_is_zoned(bdev))
280 		return -ENOTTY;
281 
282 	if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
283 		return -EFAULT;
284 
285 	if (!rep.nr_zones)
286 		return -EINVAL;
287 
288 	args.zones = argp + sizeof(struct blk_zone_report);
289 	ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
290 				  blkdev_copy_zone_to_user, &args);
291 	if (ret < 0)
292 		return ret;
293 
294 	rep.nr_zones = ret;
295 	rep.flags = BLK_ZONE_REP_CAPACITY;
296 	if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
297 		return -EFAULT;
298 	return 0;
299 }
300 
301 static int blkdev_truncate_zone_range(struct block_device *bdev,
302 		blk_mode_t mode, const struct blk_zone_range *zrange)
303 {
304 	loff_t start, end;
305 
306 	if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
307 	    zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
308 		/* Out of range */
309 		return -EINVAL;
310 
311 	start = zrange->sector << SECTOR_SHIFT;
312 	end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
313 
314 	return truncate_bdev_range(bdev, mode, start, end);
315 }
316 
317 /*
318  * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
319  * Called from blkdev_ioctl.
320  */
321 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
322 			   unsigned int cmd, unsigned long arg)
323 {
324 	void __user *argp = (void __user *)arg;
325 	struct blk_zone_range zrange;
326 	enum req_op op;
327 	int ret;
328 
329 	if (!argp)
330 		return -EINVAL;
331 
332 	if (!bdev_is_zoned(bdev))
333 		return -ENOTTY;
334 
335 	if (!(mode & BLK_OPEN_WRITE))
336 		return -EBADF;
337 
338 	if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
339 		return -EFAULT;
340 
341 	switch (cmd) {
342 	case BLKRESETZONE:
343 		op = REQ_OP_ZONE_RESET;
344 
345 		/* Invalidate the page cache, including dirty pages. */
346 		inode_lock(bdev->bd_mapping->host);
347 		filemap_invalidate_lock(bdev->bd_mapping);
348 		ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
349 		if (ret)
350 			goto fail;
351 		break;
352 	case BLKOPENZONE:
353 		op = REQ_OP_ZONE_OPEN;
354 		break;
355 	case BLKCLOSEZONE:
356 		op = REQ_OP_ZONE_CLOSE;
357 		break;
358 	case BLKFINISHZONE:
359 		op = REQ_OP_ZONE_FINISH;
360 		break;
361 	default:
362 		return -ENOTTY;
363 	}
364 
365 	ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
366 
367 fail:
368 	if (cmd == BLKRESETZONE) {
369 		filemap_invalidate_unlock(bdev->bd_mapping);
370 		inode_unlock(bdev->bd_mapping->host);
371 	}
372 
373 	return ret;
374 }
375 
376 static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)
377 {
378 	return zone->start + zone->len >= get_capacity(disk);
379 }
380 
381 static bool disk_zone_is_full(struct gendisk *disk,
382 			      unsigned int zno, unsigned int offset_in_zone)
383 {
384 	if (zno < disk->nr_zones - 1)
385 		return offset_in_zone >= disk->zone_capacity;
386 	return offset_in_zone >= disk->last_zone_capacity;
387 }
388 
389 static bool disk_zone_wplug_is_full(struct gendisk *disk,
390 				    struct blk_zone_wplug *zwplug)
391 {
392 	return disk_zone_is_full(disk, zwplug->zone_no, zwplug->wp_offset);
393 }
394 
395 static bool disk_insert_zone_wplug(struct gendisk *disk,
396 				   struct blk_zone_wplug *zwplug)
397 {
398 	struct blk_zone_wplug *zwplg;
399 	unsigned long flags;
400 	unsigned int idx =
401 		hash_32(zwplug->zone_no, disk->zone_wplugs_hash_bits);
402 
403 	/*
404 	 * Add the new zone write plug to the hash table, but carefully as we
405 	 * are racing with other submission context, so we may already have a
406 	 * zone write plug for the same zone.
407 	 */
408 	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
409 	hlist_for_each_entry_rcu(zwplg, &disk->zone_wplugs_hash[idx], node) {
410 		if (zwplg->zone_no == zwplug->zone_no) {
411 			spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
412 			return false;
413 		}
414 	}
415 	hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
416 	atomic_inc(&disk->nr_zone_wplugs);
417 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
418 
419 	return true;
420 }
421 
422 static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk,
423 							 sector_t sector)
424 {
425 	unsigned int zno = disk_zone_no(disk, sector);
426 	unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
427 	struct blk_zone_wplug *zwplug;
428 
429 	rcu_read_lock();
430 
431 	hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) {
432 		if (zwplug->zone_no == zno &&
433 		    refcount_inc_not_zero(&zwplug->ref)) {
434 			rcu_read_unlock();
435 			return zwplug;
436 		}
437 	}
438 
439 	rcu_read_unlock();
440 
441 	return NULL;
442 }
443 
444 static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
445 							 sector_t sector)
446 {
447 	if (!atomic_read(&disk->nr_zone_wplugs))
448 		return NULL;
449 
450 	return disk_get_hashed_zone_wplug(disk, sector);
451 }
452 
453 static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
454 {
455 	struct blk_zone_wplug *zwplug =
456 		container_of(rcu_head, struct blk_zone_wplug, rcu_head);
457 
458 	mempool_free(zwplug, zwplug->disk->zone_wplugs_pool);
459 }
460 
461 static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug)
462 {
463 	if (refcount_dec_and_test(&zwplug->ref)) {
464 		WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list));
465 		WARN_ON_ONCE(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
466 		WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED));
467 
468 		call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu);
469 	}
470 }
471 
472 static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
473 						 struct blk_zone_wplug *zwplug)
474 {
475 	lockdep_assert_held(&zwplug->lock);
476 
477 	/* If the zone write plug was already removed, we are done. */
478 	if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
479 		return false;
480 
481 	/* If the zone write plug is still plugged, it cannot be removed. */
482 	if (zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)
483 		return false;
484 
485 	/*
486 	 * Completions of BIOs with blk_zone_write_plug_bio_endio() may
487 	 * happen after handling a request completion with
488 	 * blk_zone_write_plug_finish_request() (e.g. with split BIOs
489 	 * that are chained). In such case, disk_zone_wplug_unplug_bio()
490 	 * should not attempt to remove the zone write plug until all BIO
491 	 * completions are seen. Check by looking at the zone write plug
492 	 * reference count, which is 2 when the plug is unused (one reference
493 	 * taken when the plug was allocated and another reference taken by the
494 	 * caller context).
495 	 */
496 	if (refcount_read(&zwplug->ref) > 2)
497 		return false;
498 
499 	/* We can remove zone write plugs for zones that are empty or full. */
500 	return !zwplug->wp_offset || disk_zone_wplug_is_full(disk, zwplug);
501 }
502 
503 static void disk_remove_zone_wplug(struct gendisk *disk,
504 				   struct blk_zone_wplug *zwplug)
505 {
506 	unsigned long flags;
507 
508 	/* If the zone write plug was already removed, we have nothing to do. */
509 	if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
510 		return;
511 
512 	/*
513 	 * Mark the zone write plug as unhashed and drop the extra reference we
514 	 * took when the plug was inserted in the hash table.
515 	 */
516 	zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
517 	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
518 	hlist_del_init_rcu(&zwplug->node);
519 	atomic_dec(&disk->nr_zone_wplugs);
520 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
521 	disk_put_zone_wplug(zwplug);
522 }
523 
524 static void blk_zone_wplug_bio_work(struct work_struct *work);
525 
526 /*
527  * Get a reference on the write plug for the zone containing @sector.
528  * If the plug does not exist, it is allocated and hashed.
529  * Return a pointer to the zone write plug with the plug spinlock held.
530  */
531 static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk,
532 					sector_t sector, gfp_t gfp_mask,
533 					unsigned long *flags)
534 {
535 	unsigned int zno = disk_zone_no(disk, sector);
536 	struct blk_zone_wplug *zwplug;
537 
538 again:
539 	zwplug = disk_get_zone_wplug(disk, sector);
540 	if (zwplug) {
541 		/*
542 		 * Check that a BIO completion or a zone reset or finish
543 		 * operation has not already removed the zone write plug from
544 		 * the hash table and dropped its reference count. In such case,
545 		 * we need to get a new plug so start over from the beginning.
546 		 */
547 		spin_lock_irqsave(&zwplug->lock, *flags);
548 		if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) {
549 			spin_unlock_irqrestore(&zwplug->lock, *flags);
550 			disk_put_zone_wplug(zwplug);
551 			goto again;
552 		}
553 		return zwplug;
554 	}
555 
556 	/*
557 	 * Allocate and initialize a zone write plug with an extra reference
558 	 * so that it is not freed when the zone write plug becomes idle without
559 	 * the zone being full.
560 	 */
561 	zwplug = mempool_alloc(disk->zone_wplugs_pool, gfp_mask);
562 	if (!zwplug)
563 		return NULL;
564 
565 	INIT_HLIST_NODE(&zwplug->node);
566 	refcount_set(&zwplug->ref, 2);
567 	spin_lock_init(&zwplug->lock);
568 	zwplug->flags = 0;
569 	zwplug->zone_no = zno;
570 	zwplug->wp_offset = bdev_offset_from_zone_start(disk->part0, sector);
571 	bio_list_init(&zwplug->bio_list);
572 	INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work);
573 	zwplug->disk = disk;
574 
575 	spin_lock_irqsave(&zwplug->lock, *flags);
576 
577 	/*
578 	 * Insert the new zone write plug in the hash table. This can fail only
579 	 * if another context already inserted a plug. Retry from the beginning
580 	 * in such case.
581 	 */
582 	if (!disk_insert_zone_wplug(disk, zwplug)) {
583 		spin_unlock_irqrestore(&zwplug->lock, *flags);
584 		mempool_free(zwplug, disk->zone_wplugs_pool);
585 		goto again;
586 	}
587 
588 	return zwplug;
589 }
590 
591 static inline void blk_zone_wplug_bio_io_error(struct blk_zone_wplug *zwplug,
592 					       struct bio *bio)
593 {
594 	struct request_queue *q = zwplug->disk->queue;
595 
596 	bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
597 	bio_io_error(bio);
598 	disk_put_zone_wplug(zwplug);
599 	/* Drop the reference taken by disk_zone_wplug_add_bio(() */
600 	blk_queue_exit(q);
601 }
602 
603 /*
604  * Abort (fail) all plugged BIOs of a zone write plug.
605  */
606 static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
607 {
608 	struct bio *bio;
609 
610 	if (bio_list_empty(&zwplug->bio_list))
611 		return;
612 
613 	pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n",
614 			    zwplug->disk->disk_name, zwplug->zone_no);
615 	while ((bio = bio_list_pop(&zwplug->bio_list)))
616 		blk_zone_wplug_bio_io_error(zwplug, bio);
617 }
618 
619 /*
620  * Set a zone write plug write pointer offset to the specified value.
621  * This aborts all plugged BIOs, which is fine as this function is called for
622  * a zone reset operation, a zone finish operation or if the zone needs a wp
623  * update from a report zone after a write error.
624  */
625 static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
626 					  struct blk_zone_wplug *zwplug,
627 					  unsigned int wp_offset)
628 {
629 	lockdep_assert_held(&zwplug->lock);
630 
631 	/* Update the zone write pointer and abort all plugged BIOs. */
632 	zwplug->flags &= ~BLK_ZONE_WPLUG_NEED_WP_UPDATE;
633 	zwplug->wp_offset = wp_offset;
634 	disk_zone_wplug_abort(zwplug);
635 
636 	/*
637 	 * The zone write plug now has no BIO plugged: remove it from the
638 	 * hash table so that it cannot be seen. The plug will be freed
639 	 * when the last reference is dropped.
640 	 */
641 	if (disk_should_remove_zone_wplug(disk, zwplug))
642 		disk_remove_zone_wplug(disk, zwplug);
643 }
644 
645 static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
646 {
647 	switch (zone->cond) {
648 	case BLK_ZONE_COND_IMP_OPEN:
649 	case BLK_ZONE_COND_EXP_OPEN:
650 	case BLK_ZONE_COND_CLOSED:
651 		return zone->wp - zone->start;
652 	case BLK_ZONE_COND_FULL:
653 		return zone->len;
654 	case BLK_ZONE_COND_EMPTY:
655 		return 0;
656 	case BLK_ZONE_COND_NOT_WP:
657 	case BLK_ZONE_COND_OFFLINE:
658 	case BLK_ZONE_COND_READONLY:
659 	default:
660 		/*
661 		 * Conventional, offline and read-only zones do not have a valid
662 		 * write pointer.
663 		 */
664 		return UINT_MAX;
665 	}
666 }
667 
668 static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
669 					   struct blk_zone *zone)
670 {
671 	struct blk_zone_wplug *zwplug;
672 	unsigned long flags;
673 
674 	zwplug = disk_get_zone_wplug(disk, zone->start);
675 	if (!zwplug)
676 		return;
677 
678 	spin_lock_irqsave(&zwplug->lock, flags);
679 	if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
680 		disk_zone_wplug_set_wp_offset(disk, zwplug,
681 					      blk_zone_wp_offset(zone));
682 	spin_unlock_irqrestore(&zwplug->lock, flags);
683 
684 	disk_put_zone_wplug(zwplug);
685 }
686 
687 static int disk_zone_sync_wp_offset(struct gendisk *disk, sector_t sector)
688 {
689 	struct disk_report_zones_cb_args args = {
690 		.disk = disk,
691 	};
692 
693 	return disk->fops->report_zones(disk, sector, 1,
694 					disk_report_zones_cb, &args);
695 }
696 
697 static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
698 						  unsigned int wp_offset)
699 {
700 	struct gendisk *disk = bio->bi_bdev->bd_disk;
701 	sector_t sector = bio->bi_iter.bi_sector;
702 	struct blk_zone_wplug *zwplug;
703 	unsigned long flags;
704 
705 	/* Conventional zones cannot be reset nor finished. */
706 	if (!bdev_zone_is_seq(bio->bi_bdev, sector)) {
707 		bio_io_error(bio);
708 		return true;
709 	}
710 
711 	/*
712 	 * No-wait reset or finish BIOs do not make much sense as the callers
713 	 * issue these as blocking operations in most cases. To avoid issues
714 	 * the BIO execution potentially failing with BLK_STS_AGAIN, warn about
715 	 * REQ_NOWAIT being set and ignore that flag.
716 	 */
717 	if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT))
718 		bio->bi_opf &= ~REQ_NOWAIT;
719 
720 	/*
721 	 * If we have a zone write plug, set its write pointer offset to 0
722 	 * (reset case) or to the zone size (finish case). This will abort all
723 	 * BIOs plugged for the target zone. It is fine as resetting or
724 	 * finishing zones while writes are still in-flight will result in the
725 	 * writes failing anyway.
726 	 */
727 	zwplug = disk_get_zone_wplug(disk, sector);
728 	if (zwplug) {
729 		spin_lock_irqsave(&zwplug->lock, flags);
730 		disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset);
731 		spin_unlock_irqrestore(&zwplug->lock, flags);
732 		disk_put_zone_wplug(zwplug);
733 	}
734 
735 	return false;
736 }
737 
738 static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
739 {
740 	struct gendisk *disk = bio->bi_bdev->bd_disk;
741 	struct blk_zone_wplug *zwplug;
742 	unsigned long flags;
743 	sector_t sector;
744 
745 	/*
746 	 * Set the write pointer offset of all zone write plugs to 0. This will
747 	 * abort all plugged BIOs. It is fine as resetting zones while writes
748 	 * are still in-flight will result in the writes failing anyway.
749 	 */
750 	for (sector = 0; sector < get_capacity(disk);
751 	     sector += disk->queue->limits.chunk_sectors) {
752 		zwplug = disk_get_zone_wplug(disk, sector);
753 		if (zwplug) {
754 			spin_lock_irqsave(&zwplug->lock, flags);
755 			disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
756 			spin_unlock_irqrestore(&zwplug->lock, flags);
757 			disk_put_zone_wplug(zwplug);
758 		}
759 	}
760 
761 	return false;
762 }
763 
764 static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
765 					      struct blk_zone_wplug *zwplug)
766 {
767 	/*
768 	 * Take a reference on the zone write plug and schedule the submission
769 	 * of the next plugged BIO. blk_zone_wplug_bio_work() will release the
770 	 * reference we take here.
771 	 */
772 	WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
773 	refcount_inc(&zwplug->ref);
774 	queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
775 }
776 
777 static inline void disk_zone_wplug_add_bio(struct gendisk *disk,
778 				struct blk_zone_wplug *zwplug,
779 				struct bio *bio, unsigned int nr_segs)
780 {
781 	bool schedule_bio_work = false;
782 
783 	/*
784 	 * Grab an extra reference on the BIO request queue usage counter.
785 	 * This reference will be reused to submit a request for the BIO for
786 	 * blk-mq devices and dropped when the BIO is failed and after
787 	 * it is issued in the case of BIO-based devices.
788 	 */
789 	percpu_ref_get(&bio->bi_bdev->bd_disk->queue->q_usage_counter);
790 
791 	/*
792 	 * The BIO is being plugged and thus will have to wait for the on-going
793 	 * write and for all other writes already plugged. So polling makes
794 	 * no sense.
795 	 */
796 	bio_clear_polled(bio);
797 
798 	/*
799 	 * REQ_NOWAIT BIOs are always handled using the zone write plug BIO
800 	 * work, which can block. So clear the REQ_NOWAIT flag and schedule the
801 	 * work if this is the first BIO we are plugging.
802 	 */
803 	if (bio->bi_opf & REQ_NOWAIT) {
804 		schedule_bio_work = !(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
805 		bio->bi_opf &= ~REQ_NOWAIT;
806 	}
807 
808 	/*
809 	 * Reuse the poll cookie field to store the number of segments when
810 	 * split to the hardware limits.
811 	 */
812 	bio->__bi_nr_segments = nr_segs;
813 
814 	/*
815 	 * We always receive BIOs after they are split and ready to be issued.
816 	 * The block layer passes the parts of a split BIO in order, and the
817 	 * user must also issue write sequentially. So simply add the new BIO
818 	 * at the tail of the list to preserve the sequential write order.
819 	 */
820 	bio_list_add(&zwplug->bio_list, bio);
821 
822 	zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
823 
824 	if (schedule_bio_work)
825 		disk_zone_wplug_schedule_bio_work(disk, zwplug);
826 }
827 
828 /*
829  * Called from bio_attempt_back_merge() when a BIO was merged with a request.
830  */
831 void blk_zone_write_plug_bio_merged(struct bio *bio)
832 {
833 	struct blk_zone_wplug *zwplug;
834 	unsigned long flags;
835 
836 	/*
837 	 * If the BIO was already plugged, then we were called through
838 	 * blk_zone_write_plug_init_request() -> blk_attempt_bio_merge().
839 	 * For this case, we already hold a reference on the zone write plug for
840 	 * the BIO and blk_zone_write_plug_init_request() will handle the
841 	 * zone write pointer offset update.
842 	 */
843 	if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
844 		return;
845 
846 	bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
847 
848 	/*
849 	 * Get a reference on the zone write plug of the target zone and advance
850 	 * the zone write pointer offset. Given that this is a merge, we already
851 	 * have at least one request and one BIO referencing the zone write
852 	 * plug. So this should not fail.
853 	 */
854 	zwplug = disk_get_zone_wplug(bio->bi_bdev->bd_disk,
855 				     bio->bi_iter.bi_sector);
856 	if (WARN_ON_ONCE(!zwplug))
857 		return;
858 
859 	spin_lock_irqsave(&zwplug->lock, flags);
860 	zwplug->wp_offset += bio_sectors(bio);
861 	spin_unlock_irqrestore(&zwplug->lock, flags);
862 }
863 
864 /*
865  * Attempt to merge plugged BIOs with a newly prepared request for a BIO that
866  * already went through zone write plugging (either a new BIO or one that was
867  * unplugged).
868  */
869 void blk_zone_write_plug_init_request(struct request *req)
870 {
871 	sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req);
872 	struct request_queue *q = req->q;
873 	struct gendisk *disk = q->disk;
874 	struct blk_zone_wplug *zwplug =
875 		disk_get_zone_wplug(disk, blk_rq_pos(req));
876 	unsigned long flags;
877 	struct bio *bio;
878 
879 	if (WARN_ON_ONCE(!zwplug))
880 		return;
881 
882 	/*
883 	 * Indicate that completion of this request needs to be handled with
884 	 * blk_zone_write_plug_finish_request(), which will drop the reference
885 	 * on the zone write plug we took above on entry to this function.
886 	 */
887 	req->rq_flags |= RQF_ZONE_WRITE_PLUGGING;
888 
889 	if (blk_queue_nomerges(q))
890 		return;
891 
892 	/*
893 	 * Walk through the list of plugged BIOs to check if they can be merged
894 	 * into the back of the request.
895 	 */
896 	spin_lock_irqsave(&zwplug->lock, flags);
897 	while (!disk_zone_wplug_is_full(disk, zwplug)) {
898 		bio = bio_list_peek(&zwplug->bio_list);
899 		if (!bio)
900 			break;
901 
902 		if (bio->bi_iter.bi_sector != req_back_sector ||
903 		    !blk_rq_merge_ok(req, bio))
904 			break;
905 
906 		WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE_ZEROES &&
907 			     !bio->__bi_nr_segments);
908 
909 		bio_list_pop(&zwplug->bio_list);
910 		if (bio_attempt_back_merge(req, bio, bio->__bi_nr_segments) !=
911 		    BIO_MERGE_OK) {
912 			bio_list_add_head(&zwplug->bio_list, bio);
913 			break;
914 		}
915 
916 		/* Drop the reference taken by disk_zone_wplug_add_bio(). */
917 		blk_queue_exit(q);
918 		zwplug->wp_offset += bio_sectors(bio);
919 
920 		req_back_sector += bio_sectors(bio);
921 	}
922 	spin_unlock_irqrestore(&zwplug->lock, flags);
923 }
924 
925 /*
926  * Check and prepare a BIO for submission by incrementing the write pointer
927  * offset of its zone write plug and changing zone append operations into
928  * regular write when zone append emulation is needed.
929  */
930 static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
931 				       struct bio *bio)
932 {
933 	struct gendisk *disk = bio->bi_bdev->bd_disk;
934 
935 	lockdep_assert_held(&zwplug->lock);
936 
937 	/*
938 	 * If we lost track of the zone write pointer due to a write error,
939 	 * the user must either execute a report zones, reset the zone or finish
940 	 * the to recover a reliable write pointer position. Fail BIOs if the
941 	 * user did not do that as we cannot handle emulated zone append
942 	 * otherwise.
943 	 */
944 	if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
945 		return false;
946 
947 	/*
948 	 * Check that the user is not attempting to write to a full zone.
949 	 * We know such BIO will fail, and that would potentially overflow our
950 	 * write pointer offset beyond the end of the zone.
951 	 */
952 	if (disk_zone_wplug_is_full(disk, zwplug))
953 		return false;
954 
955 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
956 		/*
957 		 * Use a regular write starting at the current write pointer.
958 		 * Similarly to native zone append operations, do not allow
959 		 * merging.
960 		 */
961 		bio->bi_opf &= ~REQ_OP_MASK;
962 		bio->bi_opf |= REQ_OP_WRITE | REQ_NOMERGE;
963 		bio->bi_iter.bi_sector += zwplug->wp_offset;
964 
965 		/*
966 		 * Remember that this BIO is in fact a zone append operation
967 		 * so that we can restore its operation code on completion.
968 		 */
969 		bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND);
970 	} else {
971 		/*
972 		 * Check for non-sequential writes early as we know that BIOs
973 		 * with a start sector not unaligned to the zone write pointer
974 		 * will fail.
975 		 */
976 		if (bio_offset_from_zone_start(bio) != zwplug->wp_offset)
977 			return false;
978 	}
979 
980 	/* Advance the zone write pointer offset. */
981 	zwplug->wp_offset += bio_sectors(bio);
982 
983 	return true;
984 }
985 
986 static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
987 {
988 	struct gendisk *disk = bio->bi_bdev->bd_disk;
989 	sector_t sector = bio->bi_iter.bi_sector;
990 	struct blk_zone_wplug *zwplug;
991 	gfp_t gfp_mask = GFP_NOIO;
992 	unsigned long flags;
993 
994 	/*
995 	 * BIOs must be fully contained within a zone so that we use the correct
996 	 * zone write plug for the entire BIO. For blk-mq devices, the block
997 	 * layer should already have done any splitting required to ensure this
998 	 * and this BIO should thus not be straddling zone boundaries. For
999 	 * BIO-based devices, it is the responsibility of the driver to split
1000 	 * the bio before submitting it.
1001 	 */
1002 	if (WARN_ON_ONCE(bio_straddles_zones(bio))) {
1003 		bio_io_error(bio);
1004 		return true;
1005 	}
1006 
1007 	/* Conventional zones do not need write plugging. */
1008 	if (!bdev_zone_is_seq(bio->bi_bdev, sector)) {
1009 		/* Zone append to conventional zones is not allowed. */
1010 		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1011 			bio_io_error(bio);
1012 			return true;
1013 		}
1014 		return false;
1015 	}
1016 
1017 	if (bio->bi_opf & REQ_NOWAIT)
1018 		gfp_mask = GFP_NOWAIT;
1019 
1020 	zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags);
1021 	if (!zwplug) {
1022 		if (bio->bi_opf & REQ_NOWAIT)
1023 			bio_wouldblock_error(bio);
1024 		else
1025 			bio_io_error(bio);
1026 		return true;
1027 	}
1028 
1029 	/* Indicate that this BIO is being handled using zone write plugging. */
1030 	bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
1031 
1032 	/*
1033 	 * If the zone is already plugged, add the BIO to the plug BIO list.
1034 	 * Do the same for REQ_NOWAIT BIOs to ensure that we will not see a
1035 	 * BLK_STS_AGAIN failure if we let the BIO execute.
1036 	 * Otherwise, plug and let the BIO execute.
1037 	 */
1038 	if ((zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) ||
1039 	    (bio->bi_opf & REQ_NOWAIT))
1040 		goto plug;
1041 
1042 	if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
1043 		spin_unlock_irqrestore(&zwplug->lock, flags);
1044 		bio_io_error(bio);
1045 		return true;
1046 	}
1047 
1048 	zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
1049 
1050 	spin_unlock_irqrestore(&zwplug->lock, flags);
1051 
1052 	return false;
1053 
1054 plug:
1055 	disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs);
1056 
1057 	spin_unlock_irqrestore(&zwplug->lock, flags);
1058 
1059 	return true;
1060 }
1061 
1062 static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
1063 {
1064 	struct gendisk *disk = bio->bi_bdev->bd_disk;
1065 	struct blk_zone_wplug *zwplug;
1066 	unsigned long flags;
1067 
1068 	/*
1069 	 * We have native support for zone append operations, so we are not
1070 	 * going to handle @bio through plugging. However, we may already have a
1071 	 * zone write plug for the target zone if that zone was previously
1072 	 * partially written using regular writes. In such case, we risk leaving
1073 	 * the plug in the disk hash table if the zone is fully written using
1074 	 * zone append operations. Avoid this by removing the zone write plug.
1075 	 */
1076 	zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
1077 	if (likely(!zwplug))
1078 		return;
1079 
1080 	spin_lock_irqsave(&zwplug->lock, flags);
1081 
1082 	/*
1083 	 * We are about to remove the zone write plug. But if the user
1084 	 * (mistakenly) has issued regular writes together with native zone
1085 	 * append, we must aborts the writes as otherwise the plugged BIOs would
1086 	 * not be executed by the plug BIO work as disk_get_zone_wplug() will
1087 	 * return NULL after the plug is removed. Aborting the plugged write
1088 	 * BIOs is consistent with the fact that these writes will most likely
1089 	 * fail anyway as there is no ordering guarantees between zone append
1090 	 * operations and regular write operations.
1091 	 */
1092 	if (!bio_list_empty(&zwplug->bio_list)) {
1093 		pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n",
1094 				    disk->disk_name, zwplug->zone_no);
1095 		disk_zone_wplug_abort(zwplug);
1096 	}
1097 	disk_remove_zone_wplug(disk, zwplug);
1098 	spin_unlock_irqrestore(&zwplug->lock, flags);
1099 
1100 	disk_put_zone_wplug(zwplug);
1101 }
1102 
1103 /**
1104  * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
1105  * @bio: The BIO being submitted
1106  * @nr_segs: The number of physical segments of @bio
1107  *
1108  * Handle write, write zeroes and zone append operations requiring emulation
1109  * using zone write plugging.
1110  *
1111  * Return true whenever @bio execution needs to be delayed through the zone
1112  * write plug. Otherwise, return false to let the submission path process
1113  * @bio normally.
1114  */
1115 bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
1116 {
1117 	struct block_device *bdev = bio->bi_bdev;
1118 
1119 	if (!bdev->bd_disk->zone_wplugs_hash)
1120 		return false;
1121 
1122 	/*
1123 	 * If the BIO already has the plugging flag set, then it was already
1124 	 * handled through this path and this is a submission from the zone
1125 	 * plug bio submit work.
1126 	 */
1127 	if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
1128 		return false;
1129 
1130 	/*
1131 	 * We do not need to do anything special for empty flush BIOs, e.g
1132 	 * BIOs such as issued by blkdev_issue_flush(). The is because it is
1133 	 * the responsibility of the user to first wait for the completion of
1134 	 * write operations for flush to have any effect on the persistence of
1135 	 * the written data.
1136 	 */
1137 	if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
1138 		return false;
1139 
1140 	/*
1141 	 * Regular writes and write zeroes need to be handled through the target
1142 	 * zone write plug. This includes writes with REQ_FUA | REQ_PREFLUSH
1143 	 * which may need to go through the flush machinery depending on the
1144 	 * target device capabilities. Plugging such writes is fine as the flush
1145 	 * machinery operates at the request level, below the plug, and
1146 	 * completion of the flush sequence will go through the regular BIO
1147 	 * completion, which will handle zone write plugging.
1148 	 * Zone append operations for devices that requested emulation must
1149 	 * also be plugged so that these BIOs can be changed into regular
1150 	 * write BIOs.
1151 	 * Zone reset, reset all and finish commands need special treatment
1152 	 * to correctly track the write pointer offset of zones. These commands
1153 	 * are not plugged as we do not need serialization with write
1154 	 * operations. It is the responsibility of the user to not issue reset
1155 	 * and finish commands when write operations are in flight.
1156 	 */
1157 	switch (bio_op(bio)) {
1158 	case REQ_OP_ZONE_APPEND:
1159 		if (!bdev_emulates_zone_append(bdev)) {
1160 			blk_zone_wplug_handle_native_zone_append(bio);
1161 			return false;
1162 		}
1163 		fallthrough;
1164 	case REQ_OP_WRITE:
1165 	case REQ_OP_WRITE_ZEROES:
1166 		return blk_zone_wplug_handle_write(bio, nr_segs);
1167 	case REQ_OP_ZONE_RESET:
1168 		return blk_zone_wplug_handle_reset_or_finish(bio, 0);
1169 	case REQ_OP_ZONE_FINISH:
1170 		return blk_zone_wplug_handle_reset_or_finish(bio,
1171 						bdev_zone_sectors(bdev));
1172 	case REQ_OP_ZONE_RESET_ALL:
1173 		return blk_zone_wplug_handle_reset_all(bio);
1174 	default:
1175 		return false;
1176 	}
1177 
1178 	return false;
1179 }
1180 EXPORT_SYMBOL_GPL(blk_zone_plug_bio);
1181 
1182 static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
1183 				       struct blk_zone_wplug *zwplug)
1184 {
1185 	unsigned long flags;
1186 
1187 	spin_lock_irqsave(&zwplug->lock, flags);
1188 
1189 	/* Schedule submission of the next plugged BIO if we have one. */
1190 	if (!bio_list_empty(&zwplug->bio_list)) {
1191 		disk_zone_wplug_schedule_bio_work(disk, zwplug);
1192 		spin_unlock_irqrestore(&zwplug->lock, flags);
1193 		return;
1194 	}
1195 
1196 	zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
1197 
1198 	/*
1199 	 * If the zone is full (it was fully written or finished, or empty
1200 	 * (it was reset), remove its zone write plug from the hash table.
1201 	 */
1202 	if (disk_should_remove_zone_wplug(disk, zwplug))
1203 		disk_remove_zone_wplug(disk, zwplug);
1204 
1205 	spin_unlock_irqrestore(&zwplug->lock, flags);
1206 }
1207 
1208 void blk_zone_write_plug_bio_endio(struct bio *bio)
1209 {
1210 	struct gendisk *disk = bio->bi_bdev->bd_disk;
1211 	struct blk_zone_wplug *zwplug =
1212 		disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
1213 	unsigned long flags;
1214 
1215 	if (WARN_ON_ONCE(!zwplug))
1216 		return;
1217 
1218 	/* Make sure we do not see this BIO again by clearing the plug flag. */
1219 	bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
1220 
1221 	/*
1222 	 * If this is a regular write emulating a zone append operation,
1223 	 * restore the original operation code.
1224 	 */
1225 	if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) {
1226 		bio->bi_opf &= ~REQ_OP_MASK;
1227 		bio->bi_opf |= REQ_OP_ZONE_APPEND;
1228 	}
1229 
1230 	/*
1231 	 * If the BIO failed, abort all plugged BIOs and mark the plug as
1232 	 * needing a write pointer update.
1233 	 */
1234 	if (bio->bi_status != BLK_STS_OK) {
1235 		spin_lock_irqsave(&zwplug->lock, flags);
1236 		disk_zone_wplug_abort(zwplug);
1237 		zwplug->flags |= BLK_ZONE_WPLUG_NEED_WP_UPDATE;
1238 		spin_unlock_irqrestore(&zwplug->lock, flags);
1239 	}
1240 
1241 	/* Drop the reference we took when the BIO was issued. */
1242 	disk_put_zone_wplug(zwplug);
1243 
1244 	/*
1245 	 * For BIO-based devices, blk_zone_write_plug_finish_request()
1246 	 * is not called. So we need to schedule execution of the next
1247 	 * plugged BIO here.
1248 	 */
1249 	if (bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
1250 		disk_zone_wplug_unplug_bio(disk, zwplug);
1251 
1252 	/* Drop the reference we took when entering this function. */
1253 	disk_put_zone_wplug(zwplug);
1254 }
1255 
1256 void blk_zone_write_plug_finish_request(struct request *req)
1257 {
1258 	struct gendisk *disk = req->q->disk;
1259 	struct blk_zone_wplug *zwplug;
1260 
1261 	zwplug = disk_get_zone_wplug(disk, req->__sector);
1262 	if (WARN_ON_ONCE(!zwplug))
1263 		return;
1264 
1265 	req->rq_flags &= ~RQF_ZONE_WRITE_PLUGGING;
1266 
1267 	/*
1268 	 * Drop the reference we took when the request was initialized in
1269 	 * blk_zone_write_plug_init_request().
1270 	 */
1271 	disk_put_zone_wplug(zwplug);
1272 
1273 	disk_zone_wplug_unplug_bio(disk, zwplug);
1274 
1275 	/* Drop the reference we took when entering this function. */
1276 	disk_put_zone_wplug(zwplug);
1277 }
1278 
1279 static void blk_zone_wplug_bio_work(struct work_struct *work)
1280 {
1281 	struct blk_zone_wplug *zwplug =
1282 		container_of(work, struct blk_zone_wplug, bio_work);
1283 	struct block_device *bdev;
1284 	unsigned long flags;
1285 	struct bio *bio;
1286 
1287 	/*
1288 	 * Submit the next plugged BIO. If we do not have any, clear
1289 	 * the plugged flag.
1290 	 */
1291 	spin_lock_irqsave(&zwplug->lock, flags);
1292 
1293 again:
1294 	bio = bio_list_pop(&zwplug->bio_list);
1295 	if (!bio) {
1296 		zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
1297 		spin_unlock_irqrestore(&zwplug->lock, flags);
1298 		goto put_zwplug;
1299 	}
1300 
1301 	if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
1302 		blk_zone_wplug_bio_io_error(zwplug, bio);
1303 		goto again;
1304 	}
1305 
1306 	spin_unlock_irqrestore(&zwplug->lock, flags);
1307 
1308 	bdev = bio->bi_bdev;
1309 	submit_bio_noacct_nocheck(bio);
1310 
1311 	/*
1312 	 * blk-mq devices will reuse the extra reference on the request queue
1313 	 * usage counter we took when the BIO was plugged, but the submission
1314 	 * path for BIO-based devices will not do that. So drop this extra
1315 	 * reference here.
1316 	 */
1317 	if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO))
1318 		blk_queue_exit(bdev->bd_disk->queue);
1319 
1320 put_zwplug:
1321 	/* Drop the reference we took in disk_zone_wplug_schedule_bio_work(). */
1322 	disk_put_zone_wplug(zwplug);
1323 }
1324 
1325 static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
1326 {
1327 	return 1U << disk->zone_wplugs_hash_bits;
1328 }
1329 
1330 void disk_init_zone_resources(struct gendisk *disk)
1331 {
1332 	spin_lock_init(&disk->zone_wplugs_lock);
1333 }
1334 
1335 /*
1336  * For the size of a disk zone write plug hash table, use the size of the
1337  * zone write plug mempool, which is the maximum of the disk open zones and
1338  * active zones limits. But do not exceed 4KB (512 hlist head entries), that is,
1339  * 9 bits. For a disk that has no limits, mempool size defaults to 128.
1340  */
1341 #define BLK_ZONE_WPLUG_MAX_HASH_BITS		9
1342 #define BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE	128
1343 
1344 static int disk_alloc_zone_resources(struct gendisk *disk,
1345 				     unsigned int pool_size)
1346 {
1347 	unsigned int i;
1348 
1349 	atomic_set(&disk->nr_zone_wplugs, 0);
1350 	disk->zone_wplugs_hash_bits =
1351 		min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
1352 
1353 	disk->zone_wplugs_hash =
1354 		kcalloc(disk_zone_wplugs_hash_size(disk),
1355 			sizeof(struct hlist_head), GFP_KERNEL);
1356 	if (!disk->zone_wplugs_hash)
1357 		return -ENOMEM;
1358 
1359 	for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++)
1360 		INIT_HLIST_HEAD(&disk->zone_wplugs_hash[i]);
1361 
1362 	disk->zone_wplugs_pool = mempool_create_kmalloc_pool(pool_size,
1363 						sizeof(struct blk_zone_wplug));
1364 	if (!disk->zone_wplugs_pool)
1365 		goto free_hash;
1366 
1367 	disk->zone_wplugs_wq =
1368 		alloc_workqueue("%s_zwplugs", WQ_MEM_RECLAIM | WQ_HIGHPRI,
1369 				pool_size, disk->disk_name);
1370 	if (!disk->zone_wplugs_wq)
1371 		goto destroy_pool;
1372 
1373 	return 0;
1374 
1375 destroy_pool:
1376 	mempool_destroy(disk->zone_wplugs_pool);
1377 	disk->zone_wplugs_pool = NULL;
1378 free_hash:
1379 	kfree(disk->zone_wplugs_hash);
1380 	disk->zone_wplugs_hash = NULL;
1381 	disk->zone_wplugs_hash_bits = 0;
1382 	return -ENOMEM;
1383 }
1384 
1385 static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
1386 {
1387 	struct blk_zone_wplug *zwplug;
1388 	unsigned int i;
1389 
1390 	if (!disk->zone_wplugs_hash)
1391 		return;
1392 
1393 	/* Free all the zone write plugs we have. */
1394 	for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) {
1395 		while (!hlist_empty(&disk->zone_wplugs_hash[i])) {
1396 			zwplug = hlist_entry(disk->zone_wplugs_hash[i].first,
1397 					     struct blk_zone_wplug, node);
1398 			refcount_inc(&zwplug->ref);
1399 			disk_remove_zone_wplug(disk, zwplug);
1400 			disk_put_zone_wplug(zwplug);
1401 		}
1402 	}
1403 
1404 	WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs));
1405 	kfree(disk->zone_wplugs_hash);
1406 	disk->zone_wplugs_hash = NULL;
1407 	disk->zone_wplugs_hash_bits = 0;
1408 }
1409 
1410 static unsigned int disk_set_conv_zones_bitmap(struct gendisk *disk,
1411 					       unsigned long *bitmap)
1412 {
1413 	unsigned int nr_conv_zones = 0;
1414 	unsigned long flags;
1415 
1416 	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
1417 	if (bitmap)
1418 		nr_conv_zones = bitmap_weight(bitmap, disk->nr_zones);
1419 	bitmap = rcu_replace_pointer(disk->conv_zones_bitmap, bitmap,
1420 				     lockdep_is_held(&disk->zone_wplugs_lock));
1421 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
1422 
1423 	kfree_rcu_mightsleep(bitmap);
1424 
1425 	return nr_conv_zones;
1426 }
1427 
1428 void disk_free_zone_resources(struct gendisk *disk)
1429 {
1430 	if (!disk->zone_wplugs_pool)
1431 		return;
1432 
1433 	if (disk->zone_wplugs_wq) {
1434 		destroy_workqueue(disk->zone_wplugs_wq);
1435 		disk->zone_wplugs_wq = NULL;
1436 	}
1437 
1438 	disk_destroy_zone_wplugs_hash_table(disk);
1439 
1440 	/*
1441 	 * Wait for the zone write plugs to be RCU-freed before
1442 	 * destorying the mempool.
1443 	 */
1444 	rcu_barrier();
1445 
1446 	mempool_destroy(disk->zone_wplugs_pool);
1447 	disk->zone_wplugs_pool = NULL;
1448 
1449 	disk_set_conv_zones_bitmap(disk, NULL);
1450 	disk->zone_capacity = 0;
1451 	disk->last_zone_capacity = 0;
1452 	disk->nr_zones = 0;
1453 }
1454 
1455 static inline bool disk_need_zone_resources(struct gendisk *disk)
1456 {
1457 	/*
1458 	 * All mq zoned devices need zone resources so that the block layer
1459 	 * can automatically handle write BIO plugging. BIO-based device drivers
1460 	 * (e.g. DM devices) are normally responsible for handling zone write
1461 	 * ordering and do not need zone resources, unless the driver requires
1462 	 * zone append emulation.
1463 	 */
1464 	return queue_is_mq(disk->queue) ||
1465 		queue_emulates_zone_append(disk->queue);
1466 }
1467 
1468 static int disk_revalidate_zone_resources(struct gendisk *disk,
1469 					  unsigned int nr_zones)
1470 {
1471 	struct queue_limits *lim = &disk->queue->limits;
1472 	unsigned int pool_size;
1473 
1474 	if (!disk_need_zone_resources(disk))
1475 		return 0;
1476 
1477 	/*
1478 	 * If the device has no limit on the maximum number of open and active
1479 	 * zones, use BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE.
1480 	 */
1481 	pool_size = max(lim->max_open_zones, lim->max_active_zones);
1482 	if (!pool_size)
1483 		pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_zones);
1484 
1485 	if (!disk->zone_wplugs_hash)
1486 		return disk_alloc_zone_resources(disk, pool_size);
1487 
1488 	return 0;
1489 }
1490 
1491 struct blk_revalidate_zone_args {
1492 	struct gendisk	*disk;
1493 	unsigned long	*conv_zones_bitmap;
1494 	unsigned int	nr_zones;
1495 	unsigned int	zone_capacity;
1496 	unsigned int	last_zone_capacity;
1497 	sector_t	sector;
1498 };
1499 
1500 /*
1501  * Update the disk zone resources information and device queue limits.
1502  * The disk queue is frozen when this is executed.
1503  */
1504 static int disk_update_zone_resources(struct gendisk *disk,
1505 				      struct blk_revalidate_zone_args *args)
1506 {
1507 	struct request_queue *q = disk->queue;
1508 	unsigned int nr_seq_zones, nr_conv_zones;
1509 	unsigned int pool_size;
1510 	struct queue_limits lim;
1511 
1512 	disk->nr_zones = args->nr_zones;
1513 	disk->zone_capacity = args->zone_capacity;
1514 	disk->last_zone_capacity = args->last_zone_capacity;
1515 	nr_conv_zones =
1516 		disk_set_conv_zones_bitmap(disk, args->conv_zones_bitmap);
1517 	if (nr_conv_zones >= disk->nr_zones) {
1518 		pr_warn("%s: Invalid number of conventional zones %u / %u\n",
1519 			disk->disk_name, nr_conv_zones, disk->nr_zones);
1520 		return -ENODEV;
1521 	}
1522 
1523 	lim = queue_limits_start_update(q);
1524 
1525 	/*
1526 	 * Some devices can advertize zone resource limits that are larger than
1527 	 * the number of sequential zones of the zoned block device, e.g. a
1528 	 * small ZNS namespace. For such case, assume that the zoned device has
1529 	 * no zone resource limits.
1530 	 */
1531 	nr_seq_zones = disk->nr_zones - nr_conv_zones;
1532 	if (lim.max_open_zones >= nr_seq_zones)
1533 		lim.max_open_zones = 0;
1534 	if (lim.max_active_zones >= nr_seq_zones)
1535 		lim.max_active_zones = 0;
1536 
1537 	if (!disk->zone_wplugs_pool)
1538 		goto commit;
1539 
1540 	/*
1541 	 * If the device has no limit on the maximum number of open and active
1542 	 * zones, set its max open zone limit to the mempool size to indicate
1543 	 * to the user that there is a potential performance impact due to
1544 	 * dynamic zone write plug allocation when simultaneously writing to
1545 	 * more zones than the size of the mempool.
1546 	 */
1547 	pool_size = max(lim.max_open_zones, lim.max_active_zones);
1548 	if (!pool_size)
1549 		pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_seq_zones);
1550 
1551 	mempool_resize(disk->zone_wplugs_pool, pool_size);
1552 
1553 	if (!lim.max_open_zones && !lim.max_active_zones) {
1554 		if (pool_size < nr_seq_zones)
1555 			lim.max_open_zones = pool_size;
1556 		else
1557 			lim.max_open_zones = 0;
1558 	}
1559 
1560 commit:
1561 	return queue_limits_commit_update_frozen(q, &lim);
1562 }
1563 
1564 static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
1565 				    struct blk_revalidate_zone_args *args)
1566 {
1567 	struct gendisk *disk = args->disk;
1568 
1569 	if (zone->capacity != zone->len) {
1570 		pr_warn("%s: Invalid conventional zone capacity\n",
1571 			disk->disk_name);
1572 		return -ENODEV;
1573 	}
1574 
1575 	if (disk_zone_is_last(disk, zone))
1576 		args->last_zone_capacity = zone->capacity;
1577 
1578 	if (!disk_need_zone_resources(disk))
1579 		return 0;
1580 
1581 	if (!args->conv_zones_bitmap) {
1582 		args->conv_zones_bitmap =
1583 			bitmap_zalloc(args->nr_zones, GFP_NOIO);
1584 		if (!args->conv_zones_bitmap)
1585 			return -ENOMEM;
1586 	}
1587 
1588 	set_bit(idx, args->conv_zones_bitmap);
1589 
1590 	return 0;
1591 }
1592 
1593 static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
1594 				   struct blk_revalidate_zone_args *args)
1595 {
1596 	struct gendisk *disk = args->disk;
1597 	struct blk_zone_wplug *zwplug;
1598 	unsigned int wp_offset;
1599 	unsigned long flags;
1600 
1601 	/*
1602 	 * Remember the capacity of the first sequential zone and check
1603 	 * if it is constant for all zones, ignoring the last zone as it can be
1604 	 * smaller.
1605 	 */
1606 	if (!args->zone_capacity)
1607 		args->zone_capacity = zone->capacity;
1608 	if (disk_zone_is_last(disk, zone)) {
1609 		args->last_zone_capacity = zone->capacity;
1610 	} else if (zone->capacity != args->zone_capacity) {
1611 		pr_warn("%s: Invalid variable zone capacity\n",
1612 			disk->disk_name);
1613 		return -ENODEV;
1614 	}
1615 
1616 	/*
1617 	 * If the device needs zone append emulation, we need to track the
1618 	 * write pointer of all zones that are not empty nor full. So make sure
1619 	 * we have a zone write plug for such zone if the device has a zone
1620 	 * write plug hash table.
1621 	 */
1622 	if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
1623 		return 0;
1624 
1625 	disk_zone_wplug_sync_wp_offset(disk, zone);
1626 
1627 	wp_offset = blk_zone_wp_offset(zone);
1628 	if (!wp_offset || wp_offset >= zone->capacity)
1629 		return 0;
1630 
1631 	zwplug = disk_get_and_lock_zone_wplug(disk, zone->wp, GFP_NOIO, &flags);
1632 	if (!zwplug)
1633 		return -ENOMEM;
1634 	spin_unlock_irqrestore(&zwplug->lock, flags);
1635 	disk_put_zone_wplug(zwplug);
1636 
1637 	return 0;
1638 }
1639 
1640 /*
1641  * Helper function to check the validity of zones of a zoned block device.
1642  */
1643 static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
1644 				  void *data)
1645 {
1646 	struct blk_revalidate_zone_args *args = data;
1647 	struct gendisk *disk = args->disk;
1648 	sector_t zone_sectors = disk->queue->limits.chunk_sectors;
1649 	int ret;
1650 
1651 	/* Check for bad zones and holes in the zone report */
1652 	if (zone->start != args->sector) {
1653 		pr_warn("%s: Zone gap at sectors %llu..%llu\n",
1654 			disk->disk_name, args->sector, zone->start);
1655 		return -ENODEV;
1656 	}
1657 
1658 	if (zone->start >= get_capacity(disk) || !zone->len) {
1659 		pr_warn("%s: Invalid zone start %llu, length %llu\n",
1660 			disk->disk_name, zone->start, zone->len);
1661 		return -ENODEV;
1662 	}
1663 
1664 	/*
1665 	 * All zones must have the same size, with the exception on an eventual
1666 	 * smaller last zone.
1667 	 */
1668 	if (!disk_zone_is_last(disk, zone)) {
1669 		if (zone->len != zone_sectors) {
1670 			pr_warn("%s: Invalid zoned device with non constant zone size\n",
1671 				disk->disk_name);
1672 			return -ENODEV;
1673 		}
1674 	} else if (zone->len > zone_sectors) {
1675 		pr_warn("%s: Invalid zoned device with larger last zone size\n",
1676 			disk->disk_name);
1677 		return -ENODEV;
1678 	}
1679 
1680 	if (!zone->capacity || zone->capacity > zone->len) {
1681 		pr_warn("%s: Invalid zone capacity\n",
1682 			disk->disk_name);
1683 		return -ENODEV;
1684 	}
1685 
1686 	/* Check zone type */
1687 	switch (zone->type) {
1688 	case BLK_ZONE_TYPE_CONVENTIONAL:
1689 		ret = blk_revalidate_conv_zone(zone, idx, args);
1690 		break;
1691 	case BLK_ZONE_TYPE_SEQWRITE_REQ:
1692 		ret = blk_revalidate_seq_zone(zone, idx, args);
1693 		break;
1694 	case BLK_ZONE_TYPE_SEQWRITE_PREF:
1695 	default:
1696 		pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
1697 			disk->disk_name, (int)zone->type, zone->start);
1698 		ret = -ENODEV;
1699 	}
1700 
1701 	if (!ret)
1702 		args->sector += zone->len;
1703 
1704 	return ret;
1705 }
1706 
1707 /**
1708  * blk_revalidate_disk_zones - (re)allocate and initialize zone write plugs
1709  * @disk:	Target disk
1710  *
1711  * Helper function for low-level device drivers to check, (re) allocate and
1712  * initialize resources used for managing zoned disks. This function should
1713  * normally be called by blk-mq based drivers when a zoned gendisk is probed
1714  * and when the zone configuration of the gendisk changes (e.g. after a format).
1715  * Before calling this function, the device driver must already have set the
1716  * device zone size (chunk_sector limit) and the max zone append limit.
1717  * BIO based drivers can also use this function as long as the device queue
1718  * can be safely frozen.
1719  */
1720 int blk_revalidate_disk_zones(struct gendisk *disk)
1721 {
1722 	struct request_queue *q = disk->queue;
1723 	sector_t zone_sectors = q->limits.chunk_sectors;
1724 	sector_t capacity = get_capacity(disk);
1725 	struct blk_revalidate_zone_args args = { };
1726 	unsigned int noio_flag;
1727 	int ret = -ENOMEM;
1728 
1729 	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
1730 		return -EIO;
1731 
1732 	if (!capacity)
1733 		return -ENODEV;
1734 
1735 	/*
1736 	 * Checks that the device driver indicated a valid zone size and that
1737 	 * the max zone append limit is set.
1738 	 */
1739 	if (!zone_sectors || !is_power_of_2(zone_sectors)) {
1740 		pr_warn("%s: Invalid non power of two zone size (%llu)\n",
1741 			disk->disk_name, zone_sectors);
1742 		return -ENODEV;
1743 	}
1744 
1745 	/*
1746 	 * Ensure that all memory allocations in this context are done as if
1747 	 * GFP_NOIO was specified.
1748 	 */
1749 	args.disk = disk;
1750 	args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors);
1751 	noio_flag = memalloc_noio_save();
1752 	ret = disk_revalidate_zone_resources(disk, args.nr_zones);
1753 	if (ret) {
1754 		memalloc_noio_restore(noio_flag);
1755 		return ret;
1756 	}
1757 
1758 	ret = disk->fops->report_zones(disk, 0, UINT_MAX,
1759 				       blk_revalidate_zone_cb, &args);
1760 	if (!ret) {
1761 		pr_warn("%s: No zones reported\n", disk->disk_name);
1762 		ret = -ENODEV;
1763 	}
1764 	memalloc_noio_restore(noio_flag);
1765 
1766 	/*
1767 	 * If zones where reported, make sure that the entire disk capacity
1768 	 * has been checked.
1769 	 */
1770 	if (ret > 0 && args.sector != capacity) {
1771 		pr_warn("%s: Missing zones from sector %llu\n",
1772 			disk->disk_name, args.sector);
1773 		ret = -ENODEV;
1774 	}
1775 
1776 	/*
1777 	 * Set the new disk zone parameters only once the queue is frozen and
1778 	 * all I/Os are completed.
1779 	 */
1780 	if (ret > 0)
1781 		ret = disk_update_zone_resources(disk, &args);
1782 	else
1783 		pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
1784 	if (ret) {
1785 		unsigned int memflags = blk_mq_freeze_queue(q);
1786 
1787 		disk_free_zone_resources(disk);
1788 		blk_mq_unfreeze_queue(q, memflags);
1789 	}
1790 
1791 	return ret;
1792 }
1793 EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
1794 
1795 /**
1796  * blk_zone_issue_zeroout - zero-fill a block range in a zone
1797  * @bdev:	blockdev to write
1798  * @sector:	start sector
1799  * @nr_sects:	number of sectors to write
1800  * @gfp_mask:	memory allocation flags (for bio_alloc)
1801  *
1802  * Description:
1803  *  Zero-fill a block range in a zone (@sector must be equal to the zone write
1804  *  pointer), handling potential errors due to the (initially unknown) lack of
1805  *  hardware offload (See blkdev_issue_zeroout()).
1806  */
1807 int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
1808 			   sector_t nr_sects, gfp_t gfp_mask)
1809 {
1810 	int ret;
1811 
1812 	if (WARN_ON_ONCE(!bdev_is_zoned(bdev)))
1813 		return -EIO;
1814 
1815 	ret = blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
1816 				   BLKDEV_ZERO_NOFALLBACK);
1817 	if (ret != -EOPNOTSUPP)
1818 		return ret;
1819 
1820 	/*
1821 	 * The failed call to blkdev_issue_zeroout() advanced the zone write
1822 	 * pointer. Undo this using a report zone to update the zone write
1823 	 * pointer to the correct current value.
1824 	 */
1825 	ret = disk_zone_sync_wp_offset(bdev->bd_disk, sector);
1826 	if (ret != 1)
1827 		return ret < 0 ? ret : -EIO;
1828 
1829 	/*
1830 	 * Retry without BLKDEV_ZERO_NOFALLBACK to force the fallback to a
1831 	 * regular write with zero-pages.
1832 	 */
1833 	return blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 0);
1834 }
1835 EXPORT_SYMBOL_GPL(blk_zone_issue_zeroout);
1836 
1837 #ifdef CONFIG_BLK_DEBUG_FS
1838 static void queue_zone_wplug_show(struct blk_zone_wplug *zwplug,
1839 				  struct seq_file *m)
1840 {
1841 	unsigned int zwp_wp_offset, zwp_flags;
1842 	unsigned int zwp_zone_no, zwp_ref;
1843 	unsigned int zwp_bio_list_size;
1844 	unsigned long flags;
1845 
1846 	spin_lock_irqsave(&zwplug->lock, flags);
1847 	zwp_zone_no = zwplug->zone_no;
1848 	zwp_flags = zwplug->flags;
1849 	zwp_ref = refcount_read(&zwplug->ref);
1850 	zwp_wp_offset = zwplug->wp_offset;
1851 	zwp_bio_list_size = bio_list_size(&zwplug->bio_list);
1852 	spin_unlock_irqrestore(&zwplug->lock, flags);
1853 
1854 	seq_printf(m, "%u 0x%x %u %u %u\n", zwp_zone_no, zwp_flags, zwp_ref,
1855 		   zwp_wp_offset, zwp_bio_list_size);
1856 }
1857 
1858 int queue_zone_wplugs_show(void *data, struct seq_file *m)
1859 {
1860 	struct request_queue *q = data;
1861 	struct gendisk *disk = q->disk;
1862 	struct blk_zone_wplug *zwplug;
1863 	unsigned int i;
1864 
1865 	if (!disk->zone_wplugs_hash)
1866 		return 0;
1867 
1868 	rcu_read_lock();
1869 	for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++)
1870 		hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[i],
1871 					 node)
1872 			queue_zone_wplug_show(zwplug, m);
1873 	rcu_read_unlock();
1874 
1875 	return 0;
1876 }
1877 
1878 #endif
1879