1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Zoned block device handling
4 *
5 * Copyright (c) 2015, Hannes Reinecke
6 * Copyright (c) 2015, SUSE Linux GmbH
7 *
8 * Copyright (c) 2016, Damien Le Moal
9 * Copyright (c) 2016, Western Digital
10 * Copyright (c) 2024, Western Digital Corporation or its affiliates.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/blkdev.h>
15 #include <linux/blk-mq.h>
16 #include <linux/spinlock.h>
17 #include <linux/refcount.h>
18 #include <linux/mempool.h>
19
20 #include <trace/events/block.h>
21
22 #include "blk.h"
23 #include "blk-mq-sched.h"
24 #include "blk-mq-debugfs.h"
25
26 #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
27 static const char *const zone_cond_name[] = {
28 ZONE_COND_NAME(NOT_WP),
29 ZONE_COND_NAME(EMPTY),
30 ZONE_COND_NAME(IMP_OPEN),
31 ZONE_COND_NAME(EXP_OPEN),
32 ZONE_COND_NAME(CLOSED),
33 ZONE_COND_NAME(READONLY),
34 ZONE_COND_NAME(FULL),
35 ZONE_COND_NAME(OFFLINE),
36 };
37 #undef ZONE_COND_NAME
38
39 /*
40 * Per-zone write plug.
41 * @node: hlist_node structure for managing the plug using a hash table.
42 * @ref: Zone write plug reference counter. A zone write plug reference is
43 * always at least 1 when the plug is hashed in the disk plug hash table.
44 * The reference is incremented whenever a new BIO needing plugging is
45 * submitted and when a function needs to manipulate a plug. The
46 * reference count is decremented whenever a plugged BIO completes and
47 * when a function that referenced the plug returns. The initial
48 * reference is dropped whenever the zone of the zone write plug is reset,
49 * finished and when the zone becomes full (last write BIO to the zone
50 * completes).
51 * @lock: Spinlock to atomically manipulate the plug.
52 * @flags: Flags indicating the plug state.
53 * @zone_no: The number of the zone the plug is managing.
54 * @wp_offset: The zone write pointer location relative to the start of the zone
55 * as a number of 512B sectors.
56 * @bio_list: The list of BIOs that are currently plugged.
57 * @bio_work: Work struct to handle issuing of plugged BIOs
58 * @rcu_head: RCU head to free zone write plugs with an RCU grace period.
59 * @disk: The gendisk the plug belongs to.
60 */
61 struct blk_zone_wplug {
62 struct hlist_node node;
63 refcount_t ref;
64 spinlock_t lock;
65 unsigned int flags;
66 unsigned int zone_no;
67 unsigned int wp_offset;
68 struct bio_list bio_list;
69 struct work_struct bio_work;
70 struct rcu_head rcu_head;
71 struct gendisk *disk;
72 };
73
74 /*
75 * Zone write plug flags bits:
76 * - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged,
77 * that is, that write BIOs are being throttled due to a write BIO already
78 * being executed or the zone write plug bio list is not empty.
79 * - BLK_ZONE_WPLUG_NEED_WP_UPDATE: Indicates that we lost track of a zone
80 * write pointer offset and need to update it.
81 * - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed
82 * from the disk hash table and that the initial reference to the zone
83 * write plug set when the plug was first added to the hash table has been
84 * dropped. This flag is set when a zone is reset, finished or become full,
85 * to prevent new references to the zone write plug to be taken for
86 * newly incoming BIOs. A zone write plug flagged with this flag will be
87 * freed once all remaining references from BIOs or functions are dropped.
88 */
89 #define BLK_ZONE_WPLUG_PLUGGED (1U << 0)
90 #define BLK_ZONE_WPLUG_NEED_WP_UPDATE (1U << 1)
91 #define BLK_ZONE_WPLUG_UNHASHED (1U << 2)
92
93 /**
94 * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
95 * @zone_cond: BLK_ZONE_COND_XXX.
96 *
97 * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
98 * into string format. Useful in the debugging and tracing zone conditions. For
99 * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
100 */
blk_zone_cond_str(enum blk_zone_cond zone_cond)101 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
102 {
103 static const char *zone_cond_str = "UNKNOWN";
104
105 if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
106 zone_cond_str = zone_cond_name[zone_cond];
107
108 return zone_cond_str;
109 }
110 EXPORT_SYMBOL_GPL(blk_zone_cond_str);
111
112 struct disk_report_zones_cb_args {
113 struct gendisk *disk;
114 report_zones_cb user_cb;
115 void *user_data;
116 };
117
118 static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
119 struct blk_zone *zone);
120
disk_report_zones_cb(struct blk_zone * zone,unsigned int idx,void * data)121 static int disk_report_zones_cb(struct blk_zone *zone, unsigned int idx,
122 void *data)
123 {
124 struct disk_report_zones_cb_args *args = data;
125 struct gendisk *disk = args->disk;
126
127 if (disk->zone_wplugs_hash)
128 disk_zone_wplug_sync_wp_offset(disk, zone);
129
130 if (!args->user_cb)
131 return 0;
132
133 return args->user_cb(zone, idx, args->user_data);
134 }
135
136 /**
137 * blkdev_report_zones - Get zones information
138 * @bdev: Target block device
139 * @sector: Sector from which to report zones
140 * @nr_zones: Maximum number of zones to report
141 * @cb: Callback function called for each reported zone
142 * @data: Private data for the callback
143 *
144 * Description:
145 * Get zone information starting from the zone containing @sector for at most
146 * @nr_zones, and call @cb for each zone reported by the device.
147 * To report all zones in a device starting from @sector, the BLK_ALL_ZONES
148 * constant can be passed to @nr_zones.
149 * Returns the number of zones reported by the device, or a negative errno
150 * value in case of failure.
151 *
152 * Note: The caller must use memalloc_noXX_save/restore() calls to control
153 * memory allocations done within this function.
154 */
blkdev_report_zones(struct block_device * bdev,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)155 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
156 unsigned int nr_zones, report_zones_cb cb, void *data)
157 {
158 struct gendisk *disk = bdev->bd_disk;
159 sector_t capacity = get_capacity(disk);
160 struct disk_report_zones_cb_args args = {
161 .disk = disk,
162 .user_cb = cb,
163 .user_data = data,
164 };
165
166 if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
167 return -EOPNOTSUPP;
168
169 if (!nr_zones || sector >= capacity)
170 return 0;
171
172 return disk->fops->report_zones(disk, sector, nr_zones,
173 disk_report_zones_cb, &args);
174 }
175 EXPORT_SYMBOL_GPL(blkdev_report_zones);
176
blkdev_zone_reset_all(struct block_device * bdev)177 static int blkdev_zone_reset_all(struct block_device *bdev)
178 {
179 struct bio bio;
180
181 bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC);
182 trace_blkdev_zone_mgmt(&bio, 0);
183 return submit_bio_wait(&bio);
184 }
185
186 /**
187 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
188 * @bdev: Target block device
189 * @op: Operation to be performed on the zones
190 * @sector: Start sector of the first zone to operate on
191 * @nr_sectors: Number of sectors, should be at least the length of one zone and
192 * must be zone size aligned.
193 *
194 * Description:
195 * Perform the specified operation on the range of zones specified by
196 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
197 * is valid, but the specified range should not contain conventional zones.
198 * The operation to execute on each zone can be a zone reset, open, close
199 * or finish request.
200 */
blkdev_zone_mgmt(struct block_device * bdev,enum req_op op,sector_t sector,sector_t nr_sectors)201 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
202 sector_t sector, sector_t nr_sectors)
203 {
204 sector_t zone_sectors = bdev_zone_sectors(bdev);
205 sector_t capacity = bdev_nr_sectors(bdev);
206 sector_t end_sector = sector + nr_sectors;
207 struct bio *bio = NULL;
208 int ret = 0;
209
210 if (!bdev_is_zoned(bdev))
211 return -EOPNOTSUPP;
212
213 if (bdev_read_only(bdev))
214 return -EPERM;
215
216 if (!op_is_zone_mgmt(op))
217 return -EOPNOTSUPP;
218
219 if (end_sector <= sector || end_sector > capacity)
220 /* Out of range */
221 return -EINVAL;
222
223 /* Check alignment (handle eventual smaller last zone) */
224 if (!bdev_is_zone_start(bdev, sector))
225 return -EINVAL;
226
227 if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity)
228 return -EINVAL;
229
230 /*
231 * In the case of a zone reset operation over all zones, use
232 * REQ_OP_ZONE_RESET_ALL.
233 */
234 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity)
235 return blkdev_zone_reset_all(bdev);
236
237 while (sector < end_sector) {
238 bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL);
239 bio->bi_iter.bi_sector = sector;
240 sector += zone_sectors;
241
242 /* This may take a while, so be nice to others */
243 cond_resched();
244 }
245
246 trace_blkdev_zone_mgmt(bio, nr_sectors);
247 ret = submit_bio_wait(bio);
248 bio_put(bio);
249
250 return ret;
251 }
252 EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
253
254 struct zone_report_args {
255 struct blk_zone __user *zones;
256 };
257
blkdev_copy_zone_to_user(struct blk_zone * zone,unsigned int idx,void * data)258 static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
259 void *data)
260 {
261 struct zone_report_args *args = data;
262
263 if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
264 return -EFAULT;
265 return 0;
266 }
267
268 /*
269 * BLKREPORTZONE ioctl processing.
270 * Called from blkdev_ioctl.
271 */
blkdev_report_zones_ioctl(struct block_device * bdev,unsigned int cmd,unsigned long arg)272 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
273 unsigned long arg)
274 {
275 void __user *argp = (void __user *)arg;
276 struct zone_report_args args;
277 struct blk_zone_report rep;
278 int ret;
279
280 if (!argp)
281 return -EINVAL;
282
283 if (!bdev_is_zoned(bdev))
284 return -ENOTTY;
285
286 if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
287 return -EFAULT;
288
289 if (!rep.nr_zones)
290 return -EINVAL;
291
292 args.zones = argp + sizeof(struct blk_zone_report);
293 ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
294 blkdev_copy_zone_to_user, &args);
295 if (ret < 0)
296 return ret;
297
298 rep.nr_zones = ret;
299 rep.flags = BLK_ZONE_REP_CAPACITY;
300 if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
301 return -EFAULT;
302 return 0;
303 }
304
blkdev_truncate_zone_range(struct block_device * bdev,blk_mode_t mode,const struct blk_zone_range * zrange)305 static int blkdev_truncate_zone_range(struct block_device *bdev,
306 blk_mode_t mode, const struct blk_zone_range *zrange)
307 {
308 loff_t start, end;
309
310 if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
311 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
312 /* Out of range */
313 return -EINVAL;
314
315 start = zrange->sector << SECTOR_SHIFT;
316 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
317
318 return truncate_bdev_range(bdev, mode, start, end);
319 }
320
321 /*
322 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
323 * Called from blkdev_ioctl.
324 */
blkdev_zone_mgmt_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)325 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
326 unsigned int cmd, unsigned long arg)
327 {
328 void __user *argp = (void __user *)arg;
329 struct blk_zone_range zrange;
330 enum req_op op;
331 int ret;
332
333 if (!argp)
334 return -EINVAL;
335
336 if (!bdev_is_zoned(bdev))
337 return -ENOTTY;
338
339 if (!(mode & BLK_OPEN_WRITE))
340 return -EBADF;
341
342 if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
343 return -EFAULT;
344
345 switch (cmd) {
346 case BLKRESETZONE:
347 op = REQ_OP_ZONE_RESET;
348
349 /* Invalidate the page cache, including dirty pages. */
350 inode_lock(bdev->bd_mapping->host);
351 filemap_invalidate_lock(bdev->bd_mapping);
352 ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
353 if (ret)
354 goto fail;
355 break;
356 case BLKOPENZONE:
357 op = REQ_OP_ZONE_OPEN;
358 break;
359 case BLKCLOSEZONE:
360 op = REQ_OP_ZONE_CLOSE;
361 break;
362 case BLKFINISHZONE:
363 op = REQ_OP_ZONE_FINISH;
364 break;
365 default:
366 return -ENOTTY;
367 }
368
369 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
370
371 fail:
372 if (cmd == BLKRESETZONE) {
373 filemap_invalidate_unlock(bdev->bd_mapping);
374 inode_unlock(bdev->bd_mapping->host);
375 }
376
377 return ret;
378 }
379
disk_zone_is_last(struct gendisk * disk,struct blk_zone * zone)380 static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)
381 {
382 return zone->start + zone->len >= get_capacity(disk);
383 }
384
disk_zone_is_full(struct gendisk * disk,unsigned int zno,unsigned int offset_in_zone)385 static bool disk_zone_is_full(struct gendisk *disk,
386 unsigned int zno, unsigned int offset_in_zone)
387 {
388 if (zno < disk->nr_zones - 1)
389 return offset_in_zone >= disk->zone_capacity;
390 return offset_in_zone >= disk->last_zone_capacity;
391 }
392
disk_zone_wplug_is_full(struct gendisk * disk,struct blk_zone_wplug * zwplug)393 static bool disk_zone_wplug_is_full(struct gendisk *disk,
394 struct blk_zone_wplug *zwplug)
395 {
396 return disk_zone_is_full(disk, zwplug->zone_no, zwplug->wp_offset);
397 }
398
disk_insert_zone_wplug(struct gendisk * disk,struct blk_zone_wplug * zwplug)399 static bool disk_insert_zone_wplug(struct gendisk *disk,
400 struct blk_zone_wplug *zwplug)
401 {
402 struct blk_zone_wplug *zwplg;
403 unsigned long flags;
404 unsigned int idx =
405 hash_32(zwplug->zone_no, disk->zone_wplugs_hash_bits);
406
407 /*
408 * Add the new zone write plug to the hash table, but carefully as we
409 * are racing with other submission context, so we may already have a
410 * zone write plug for the same zone.
411 */
412 spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
413 hlist_for_each_entry_rcu(zwplg, &disk->zone_wplugs_hash[idx], node) {
414 if (zwplg->zone_no == zwplug->zone_no) {
415 spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
416 return false;
417 }
418 }
419 hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
420 atomic_inc(&disk->nr_zone_wplugs);
421 spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
422
423 return true;
424 }
425
disk_get_hashed_zone_wplug(struct gendisk * disk,sector_t sector)426 static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk,
427 sector_t sector)
428 {
429 unsigned int zno = disk_zone_no(disk, sector);
430 unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
431 struct blk_zone_wplug *zwplug;
432
433 rcu_read_lock();
434
435 hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) {
436 if (zwplug->zone_no == zno &&
437 refcount_inc_not_zero(&zwplug->ref)) {
438 rcu_read_unlock();
439 return zwplug;
440 }
441 }
442
443 rcu_read_unlock();
444
445 return NULL;
446 }
447
disk_get_zone_wplug(struct gendisk * disk,sector_t sector)448 static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
449 sector_t sector)
450 {
451 if (!atomic_read(&disk->nr_zone_wplugs))
452 return NULL;
453
454 return disk_get_hashed_zone_wplug(disk, sector);
455 }
456
disk_free_zone_wplug_rcu(struct rcu_head * rcu_head)457 static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
458 {
459 struct blk_zone_wplug *zwplug =
460 container_of(rcu_head, struct blk_zone_wplug, rcu_head);
461
462 mempool_free(zwplug, zwplug->disk->zone_wplugs_pool);
463 }
464
disk_put_zone_wplug(struct blk_zone_wplug * zwplug)465 static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug)
466 {
467 if (refcount_dec_and_test(&zwplug->ref)) {
468 WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list));
469 WARN_ON_ONCE(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
470 WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED));
471
472 call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu);
473 }
474 }
475
disk_should_remove_zone_wplug(struct gendisk * disk,struct blk_zone_wplug * zwplug)476 static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
477 struct blk_zone_wplug *zwplug)
478 {
479 lockdep_assert_held(&zwplug->lock);
480
481 /* If the zone write plug was already removed, we are done. */
482 if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
483 return false;
484
485 /* If the zone write plug is still plugged, it cannot be removed. */
486 if (zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)
487 return false;
488
489 /*
490 * Completions of BIOs with blk_zone_write_plug_bio_endio() may
491 * happen after handling a request completion with
492 * blk_zone_write_plug_finish_request() (e.g. with split BIOs
493 * that are chained). In such case, disk_zone_wplug_unplug_bio()
494 * should not attempt to remove the zone write plug until all BIO
495 * completions are seen. Check by looking at the zone write plug
496 * reference count, which is 2 when the plug is unused (one reference
497 * taken when the plug was allocated and another reference taken by the
498 * caller context).
499 */
500 if (refcount_read(&zwplug->ref) > 2)
501 return false;
502
503 /* We can remove zone write plugs for zones that are empty or full. */
504 return !zwplug->wp_offset || disk_zone_wplug_is_full(disk, zwplug);
505 }
506
disk_remove_zone_wplug(struct gendisk * disk,struct blk_zone_wplug * zwplug)507 static void disk_remove_zone_wplug(struct gendisk *disk,
508 struct blk_zone_wplug *zwplug)
509 {
510 unsigned long flags;
511
512 /* If the zone write plug was already removed, we have nothing to do. */
513 if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
514 return;
515
516 /*
517 * Mark the zone write plug as unhashed and drop the extra reference we
518 * took when the plug was inserted in the hash table.
519 */
520 zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
521 spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
522 hlist_del_init_rcu(&zwplug->node);
523 atomic_dec(&disk->nr_zone_wplugs);
524 spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
525 disk_put_zone_wplug(zwplug);
526 }
527
528 static void blk_zone_wplug_bio_work(struct work_struct *work);
529
530 /*
531 * Get a reference on the write plug for the zone containing @sector.
532 * If the plug does not exist, it is allocated and hashed.
533 * Return a pointer to the zone write plug with the plug spinlock held.
534 */
disk_get_and_lock_zone_wplug(struct gendisk * disk,sector_t sector,gfp_t gfp_mask,unsigned long * flags)535 static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk,
536 sector_t sector, gfp_t gfp_mask,
537 unsigned long *flags)
538 {
539 unsigned int zno = disk_zone_no(disk, sector);
540 struct blk_zone_wplug *zwplug;
541
542 again:
543 zwplug = disk_get_zone_wplug(disk, sector);
544 if (zwplug) {
545 /*
546 * Check that a BIO completion or a zone reset or finish
547 * operation has not already removed the zone write plug from
548 * the hash table and dropped its reference count. In such case,
549 * we need to get a new plug so start over from the beginning.
550 */
551 spin_lock_irqsave(&zwplug->lock, *flags);
552 if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) {
553 spin_unlock_irqrestore(&zwplug->lock, *flags);
554 disk_put_zone_wplug(zwplug);
555 goto again;
556 }
557 return zwplug;
558 }
559
560 /*
561 * Allocate and initialize a zone write plug with an extra reference
562 * so that it is not freed when the zone write plug becomes idle without
563 * the zone being full.
564 */
565 zwplug = mempool_alloc(disk->zone_wplugs_pool, gfp_mask);
566 if (!zwplug)
567 return NULL;
568
569 INIT_HLIST_NODE(&zwplug->node);
570 refcount_set(&zwplug->ref, 2);
571 spin_lock_init(&zwplug->lock);
572 zwplug->flags = 0;
573 zwplug->zone_no = zno;
574 zwplug->wp_offset = bdev_offset_from_zone_start(disk->part0, sector);
575 bio_list_init(&zwplug->bio_list);
576 INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work);
577 zwplug->disk = disk;
578
579 spin_lock_irqsave(&zwplug->lock, *flags);
580
581 /*
582 * Insert the new zone write plug in the hash table. This can fail only
583 * if another context already inserted a plug. Retry from the beginning
584 * in such case.
585 */
586 if (!disk_insert_zone_wplug(disk, zwplug)) {
587 spin_unlock_irqrestore(&zwplug->lock, *flags);
588 mempool_free(zwplug, disk->zone_wplugs_pool);
589 goto again;
590 }
591
592 return zwplug;
593 }
594
blk_zone_wplug_bio_io_error(struct blk_zone_wplug * zwplug,struct bio * bio)595 static inline void blk_zone_wplug_bio_io_error(struct blk_zone_wplug *zwplug,
596 struct bio *bio)
597 {
598 struct request_queue *q = zwplug->disk->queue;
599
600 bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
601 bio_io_error(bio);
602 disk_put_zone_wplug(zwplug);
603 /* Drop the reference taken by disk_zone_wplug_add_bio(() */
604 blk_queue_exit(q);
605 }
606
607 /*
608 * Abort (fail) all plugged BIOs of a zone write plug.
609 */
disk_zone_wplug_abort(struct blk_zone_wplug * zwplug)610 static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
611 {
612 struct bio *bio;
613
614 if (bio_list_empty(&zwplug->bio_list))
615 return;
616
617 pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n",
618 zwplug->disk->disk_name, zwplug->zone_no);
619 while ((bio = bio_list_pop(&zwplug->bio_list)))
620 blk_zone_wplug_bio_io_error(zwplug, bio);
621 }
622
623 /*
624 * Set a zone write plug write pointer offset to the specified value.
625 * This aborts all plugged BIOs, which is fine as this function is called for
626 * a zone reset operation, a zone finish operation or if the zone needs a wp
627 * update from a report zone after a write error.
628 */
disk_zone_wplug_set_wp_offset(struct gendisk * disk,struct blk_zone_wplug * zwplug,unsigned int wp_offset)629 static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
630 struct blk_zone_wplug *zwplug,
631 unsigned int wp_offset)
632 {
633 lockdep_assert_held(&zwplug->lock);
634
635 /* Update the zone write pointer and abort all plugged BIOs. */
636 zwplug->flags &= ~BLK_ZONE_WPLUG_NEED_WP_UPDATE;
637 zwplug->wp_offset = wp_offset;
638 disk_zone_wplug_abort(zwplug);
639
640 /*
641 * The zone write plug now has no BIO plugged: remove it from the
642 * hash table so that it cannot be seen. The plug will be freed
643 * when the last reference is dropped.
644 */
645 if (disk_should_remove_zone_wplug(disk, zwplug))
646 disk_remove_zone_wplug(disk, zwplug);
647 }
648
blk_zone_wp_offset(struct blk_zone * zone)649 static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
650 {
651 switch (zone->cond) {
652 case BLK_ZONE_COND_IMP_OPEN:
653 case BLK_ZONE_COND_EXP_OPEN:
654 case BLK_ZONE_COND_CLOSED:
655 return zone->wp - zone->start;
656 case BLK_ZONE_COND_FULL:
657 return zone->len;
658 case BLK_ZONE_COND_EMPTY:
659 return 0;
660 case BLK_ZONE_COND_NOT_WP:
661 case BLK_ZONE_COND_OFFLINE:
662 case BLK_ZONE_COND_READONLY:
663 default:
664 /*
665 * Conventional, offline and read-only zones do not have a valid
666 * write pointer.
667 */
668 return UINT_MAX;
669 }
670 }
671
disk_zone_wplug_sync_wp_offset(struct gendisk * disk,struct blk_zone * zone)672 static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
673 struct blk_zone *zone)
674 {
675 struct blk_zone_wplug *zwplug;
676 unsigned long flags;
677
678 zwplug = disk_get_zone_wplug(disk, zone->start);
679 if (!zwplug)
680 return;
681
682 spin_lock_irqsave(&zwplug->lock, flags);
683 if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
684 disk_zone_wplug_set_wp_offset(disk, zwplug,
685 blk_zone_wp_offset(zone));
686 spin_unlock_irqrestore(&zwplug->lock, flags);
687
688 disk_put_zone_wplug(zwplug);
689 }
690
disk_zone_sync_wp_offset(struct gendisk * disk,sector_t sector)691 static int disk_zone_sync_wp_offset(struct gendisk *disk, sector_t sector)
692 {
693 struct disk_report_zones_cb_args args = {
694 .disk = disk,
695 };
696
697 return disk->fops->report_zones(disk, sector, 1,
698 disk_report_zones_cb, &args);
699 }
700
blk_zone_wplug_handle_reset_or_finish(struct bio * bio,unsigned int wp_offset)701 static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
702 unsigned int wp_offset)
703 {
704 struct gendisk *disk = bio->bi_bdev->bd_disk;
705 sector_t sector = bio->bi_iter.bi_sector;
706 struct blk_zone_wplug *zwplug;
707 unsigned long flags;
708
709 /* Conventional zones cannot be reset nor finished. */
710 if (!bdev_zone_is_seq(bio->bi_bdev, sector)) {
711 bio_io_error(bio);
712 return true;
713 }
714
715 /*
716 * No-wait reset or finish BIOs do not make much sense as the callers
717 * issue these as blocking operations in most cases. To avoid issues
718 * the BIO execution potentially failing with BLK_STS_AGAIN, warn about
719 * REQ_NOWAIT being set and ignore that flag.
720 */
721 if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT))
722 bio->bi_opf &= ~REQ_NOWAIT;
723
724 /*
725 * If we have a zone write plug, set its write pointer offset to 0
726 * (reset case) or to the zone size (finish case). This will abort all
727 * BIOs plugged for the target zone. It is fine as resetting or
728 * finishing zones while writes are still in-flight will result in the
729 * writes failing anyway.
730 */
731 zwplug = disk_get_zone_wplug(disk, sector);
732 if (zwplug) {
733 spin_lock_irqsave(&zwplug->lock, flags);
734 disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset);
735 spin_unlock_irqrestore(&zwplug->lock, flags);
736 disk_put_zone_wplug(zwplug);
737 }
738
739 return false;
740 }
741
blk_zone_wplug_handle_reset_all(struct bio * bio)742 static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
743 {
744 struct gendisk *disk = bio->bi_bdev->bd_disk;
745 struct blk_zone_wplug *zwplug;
746 unsigned long flags;
747 sector_t sector;
748
749 /*
750 * Set the write pointer offset of all zone write plugs to 0. This will
751 * abort all plugged BIOs. It is fine as resetting zones while writes
752 * are still in-flight will result in the writes failing anyway.
753 */
754 for (sector = 0; sector < get_capacity(disk);
755 sector += disk->queue->limits.chunk_sectors) {
756 zwplug = disk_get_zone_wplug(disk, sector);
757 if (zwplug) {
758 spin_lock_irqsave(&zwplug->lock, flags);
759 disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
760 spin_unlock_irqrestore(&zwplug->lock, flags);
761 disk_put_zone_wplug(zwplug);
762 }
763 }
764
765 return false;
766 }
767
disk_zone_wplug_schedule_bio_work(struct gendisk * disk,struct blk_zone_wplug * zwplug)768 static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
769 struct blk_zone_wplug *zwplug)
770 {
771 /*
772 * Take a reference on the zone write plug and schedule the submission
773 * of the next plugged BIO. blk_zone_wplug_bio_work() will release the
774 * reference we take here.
775 */
776 WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
777 refcount_inc(&zwplug->ref);
778 queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
779 }
780
disk_zone_wplug_add_bio(struct gendisk * disk,struct blk_zone_wplug * zwplug,struct bio * bio,unsigned int nr_segs)781 static inline void disk_zone_wplug_add_bio(struct gendisk *disk,
782 struct blk_zone_wplug *zwplug,
783 struct bio *bio, unsigned int nr_segs)
784 {
785 bool schedule_bio_work = false;
786
787 /*
788 * Grab an extra reference on the BIO request queue usage counter.
789 * This reference will be reused to submit a request for the BIO for
790 * blk-mq devices and dropped when the BIO is failed and after
791 * it is issued in the case of BIO-based devices.
792 */
793 percpu_ref_get(&bio->bi_bdev->bd_disk->queue->q_usage_counter);
794
795 /*
796 * The BIO is being plugged and thus will have to wait for the on-going
797 * write and for all other writes already plugged. So polling makes
798 * no sense.
799 */
800 bio_clear_polled(bio);
801
802 /*
803 * REQ_NOWAIT BIOs are always handled using the zone write plug BIO
804 * work, which can block. So clear the REQ_NOWAIT flag and schedule the
805 * work if this is the first BIO we are plugging.
806 */
807 if (bio->bi_opf & REQ_NOWAIT) {
808 schedule_bio_work = !(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
809 bio->bi_opf &= ~REQ_NOWAIT;
810 }
811
812 /*
813 * Reuse the poll cookie field to store the number of segments when
814 * split to the hardware limits.
815 */
816 bio->__bi_nr_segments = nr_segs;
817
818 /*
819 * We always receive BIOs after they are split and ready to be issued.
820 * The block layer passes the parts of a split BIO in order, and the
821 * user must also issue write sequentially. So simply add the new BIO
822 * at the tail of the list to preserve the sequential write order.
823 */
824 bio_list_add(&zwplug->bio_list, bio);
825 trace_disk_zone_wplug_add_bio(zwplug->disk->queue, zwplug->zone_no,
826 bio->bi_iter.bi_sector, bio_sectors(bio));
827
828 zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
829
830 if (schedule_bio_work)
831 disk_zone_wplug_schedule_bio_work(disk, zwplug);
832 }
833
834 /*
835 * Called from bio_attempt_back_merge() when a BIO was merged with a request.
836 */
blk_zone_write_plug_bio_merged(struct bio * bio)837 void blk_zone_write_plug_bio_merged(struct bio *bio)
838 {
839 struct blk_zone_wplug *zwplug;
840 unsigned long flags;
841
842 /*
843 * If the BIO was already plugged, then we were called through
844 * blk_zone_write_plug_init_request() -> blk_attempt_bio_merge().
845 * For this case, we already hold a reference on the zone write plug for
846 * the BIO and blk_zone_write_plug_init_request() will handle the
847 * zone write pointer offset update.
848 */
849 if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
850 return;
851
852 bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
853
854 /*
855 * Get a reference on the zone write plug of the target zone and advance
856 * the zone write pointer offset. Given that this is a merge, we already
857 * have at least one request and one BIO referencing the zone write
858 * plug. So this should not fail.
859 */
860 zwplug = disk_get_zone_wplug(bio->bi_bdev->bd_disk,
861 bio->bi_iter.bi_sector);
862 if (WARN_ON_ONCE(!zwplug))
863 return;
864
865 spin_lock_irqsave(&zwplug->lock, flags);
866 zwplug->wp_offset += bio_sectors(bio);
867 spin_unlock_irqrestore(&zwplug->lock, flags);
868 }
869
870 /*
871 * Attempt to merge plugged BIOs with a newly prepared request for a BIO that
872 * already went through zone write plugging (either a new BIO or one that was
873 * unplugged).
874 */
blk_zone_write_plug_init_request(struct request * req)875 void blk_zone_write_plug_init_request(struct request *req)
876 {
877 sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req);
878 struct request_queue *q = req->q;
879 struct gendisk *disk = q->disk;
880 struct blk_zone_wplug *zwplug =
881 disk_get_zone_wplug(disk, blk_rq_pos(req));
882 unsigned long flags;
883 struct bio *bio;
884
885 if (WARN_ON_ONCE(!zwplug))
886 return;
887
888 /*
889 * Indicate that completion of this request needs to be handled with
890 * blk_zone_write_plug_finish_request(), which will drop the reference
891 * on the zone write plug we took above on entry to this function.
892 */
893 req->rq_flags |= RQF_ZONE_WRITE_PLUGGING;
894
895 if (blk_queue_nomerges(q))
896 return;
897
898 /*
899 * Walk through the list of plugged BIOs to check if they can be merged
900 * into the back of the request.
901 */
902 spin_lock_irqsave(&zwplug->lock, flags);
903 while (!disk_zone_wplug_is_full(disk, zwplug)) {
904 bio = bio_list_peek(&zwplug->bio_list);
905 if (!bio)
906 break;
907
908 if (bio->bi_iter.bi_sector != req_back_sector ||
909 !blk_rq_merge_ok(req, bio))
910 break;
911
912 WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE_ZEROES &&
913 !bio->__bi_nr_segments);
914
915 bio_list_pop(&zwplug->bio_list);
916 if (bio_attempt_back_merge(req, bio, bio->__bi_nr_segments) !=
917 BIO_MERGE_OK) {
918 bio_list_add_head(&zwplug->bio_list, bio);
919 break;
920 }
921
922 /* Drop the reference taken by disk_zone_wplug_add_bio(). */
923 blk_queue_exit(q);
924 zwplug->wp_offset += bio_sectors(bio);
925
926 req_back_sector += bio_sectors(bio);
927 }
928 spin_unlock_irqrestore(&zwplug->lock, flags);
929 }
930
931 /*
932 * Check and prepare a BIO for submission by incrementing the write pointer
933 * offset of its zone write plug and changing zone append operations into
934 * regular write when zone append emulation is needed.
935 */
blk_zone_wplug_prepare_bio(struct blk_zone_wplug * zwplug,struct bio * bio)936 static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
937 struct bio *bio)
938 {
939 struct gendisk *disk = bio->bi_bdev->bd_disk;
940
941 lockdep_assert_held(&zwplug->lock);
942
943 /*
944 * If we lost track of the zone write pointer due to a write error,
945 * the user must either execute a report zones, reset the zone or finish
946 * the to recover a reliable write pointer position. Fail BIOs if the
947 * user did not do that as we cannot handle emulated zone append
948 * otherwise.
949 */
950 if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
951 return false;
952
953 /*
954 * Check that the user is not attempting to write to a full zone.
955 * We know such BIO will fail, and that would potentially overflow our
956 * write pointer offset beyond the end of the zone.
957 */
958 if (disk_zone_wplug_is_full(disk, zwplug))
959 return false;
960
961 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
962 /*
963 * Use a regular write starting at the current write pointer.
964 * Similarly to native zone append operations, do not allow
965 * merging.
966 */
967 bio->bi_opf &= ~REQ_OP_MASK;
968 bio->bi_opf |= REQ_OP_WRITE | REQ_NOMERGE;
969 bio->bi_iter.bi_sector += zwplug->wp_offset;
970
971 /*
972 * Remember that this BIO is in fact a zone append operation
973 * so that we can restore its operation code on completion.
974 */
975 bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND);
976 } else {
977 /*
978 * Check for non-sequential writes early as we know that BIOs
979 * with a start sector not unaligned to the zone write pointer
980 * will fail.
981 */
982 if (bio_offset_from_zone_start(bio) != zwplug->wp_offset)
983 return false;
984 }
985
986 /* Advance the zone write pointer offset. */
987 zwplug->wp_offset += bio_sectors(bio);
988
989 return true;
990 }
991
blk_zone_wplug_handle_write(struct bio * bio,unsigned int nr_segs)992 static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
993 {
994 struct gendisk *disk = bio->bi_bdev->bd_disk;
995 sector_t sector = bio->bi_iter.bi_sector;
996 struct blk_zone_wplug *zwplug;
997 gfp_t gfp_mask = GFP_NOIO;
998 unsigned long flags;
999
1000 /*
1001 * BIOs must be fully contained within a zone so that we use the correct
1002 * zone write plug for the entire BIO. For blk-mq devices, the block
1003 * layer should already have done any splitting required to ensure this
1004 * and this BIO should thus not be straddling zone boundaries. For
1005 * BIO-based devices, it is the responsibility of the driver to split
1006 * the bio before submitting it.
1007 */
1008 if (WARN_ON_ONCE(bio_straddles_zones(bio))) {
1009 bio_io_error(bio);
1010 return true;
1011 }
1012
1013 /* Conventional zones do not need write plugging. */
1014 if (!bdev_zone_is_seq(bio->bi_bdev, sector)) {
1015 /* Zone append to conventional zones is not allowed. */
1016 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1017 bio_io_error(bio);
1018 return true;
1019 }
1020 return false;
1021 }
1022
1023 if (bio->bi_opf & REQ_NOWAIT)
1024 gfp_mask = GFP_NOWAIT;
1025
1026 zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags);
1027 if (!zwplug) {
1028 if (bio->bi_opf & REQ_NOWAIT)
1029 bio_wouldblock_error(bio);
1030 else
1031 bio_io_error(bio);
1032 return true;
1033 }
1034
1035 /* Indicate that this BIO is being handled using zone write plugging. */
1036 bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
1037
1038 /*
1039 * If the zone is already plugged, add the BIO to the plug BIO list.
1040 * Do the same for REQ_NOWAIT BIOs to ensure that we will not see a
1041 * BLK_STS_AGAIN failure if we let the BIO execute.
1042 * Otherwise, plug and let the BIO execute.
1043 */
1044 if ((zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) ||
1045 (bio->bi_opf & REQ_NOWAIT))
1046 goto plug;
1047
1048 if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
1049 spin_unlock_irqrestore(&zwplug->lock, flags);
1050 bio_io_error(bio);
1051 return true;
1052 }
1053
1054 zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
1055
1056 spin_unlock_irqrestore(&zwplug->lock, flags);
1057
1058 return false;
1059
1060 plug:
1061 disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs);
1062
1063 spin_unlock_irqrestore(&zwplug->lock, flags);
1064
1065 return true;
1066 }
1067
blk_zone_wplug_handle_native_zone_append(struct bio * bio)1068 static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
1069 {
1070 struct gendisk *disk = bio->bi_bdev->bd_disk;
1071 struct blk_zone_wplug *zwplug;
1072 unsigned long flags;
1073
1074 /*
1075 * We have native support for zone append operations, so we are not
1076 * going to handle @bio through plugging. However, we may already have a
1077 * zone write plug for the target zone if that zone was previously
1078 * partially written using regular writes. In such case, we risk leaving
1079 * the plug in the disk hash table if the zone is fully written using
1080 * zone append operations. Avoid this by removing the zone write plug.
1081 */
1082 zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
1083 if (likely(!zwplug))
1084 return;
1085
1086 spin_lock_irqsave(&zwplug->lock, flags);
1087
1088 /*
1089 * We are about to remove the zone write plug. But if the user
1090 * (mistakenly) has issued regular writes together with native zone
1091 * append, we must aborts the writes as otherwise the plugged BIOs would
1092 * not be executed by the plug BIO work as disk_get_zone_wplug() will
1093 * return NULL after the plug is removed. Aborting the plugged write
1094 * BIOs is consistent with the fact that these writes will most likely
1095 * fail anyway as there is no ordering guarantees between zone append
1096 * operations and regular write operations.
1097 */
1098 if (!bio_list_empty(&zwplug->bio_list)) {
1099 pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n",
1100 disk->disk_name, zwplug->zone_no);
1101 disk_zone_wplug_abort(zwplug);
1102 }
1103 disk_remove_zone_wplug(disk, zwplug);
1104 spin_unlock_irqrestore(&zwplug->lock, flags);
1105
1106 disk_put_zone_wplug(zwplug);
1107 }
1108
1109 /**
1110 * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
1111 * @bio: The BIO being submitted
1112 * @nr_segs: The number of physical segments of @bio
1113 *
1114 * Handle write, write zeroes and zone append operations requiring emulation
1115 * using zone write plugging.
1116 *
1117 * Return true whenever @bio execution needs to be delayed through the zone
1118 * write plug. Otherwise, return false to let the submission path process
1119 * @bio normally.
1120 */
blk_zone_plug_bio(struct bio * bio,unsigned int nr_segs)1121 bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
1122 {
1123 struct block_device *bdev = bio->bi_bdev;
1124
1125 if (WARN_ON_ONCE(!bdev->bd_disk->zone_wplugs_hash))
1126 return false;
1127
1128 /*
1129 * Regular writes and write zeroes need to be handled through the target
1130 * zone write plug. This includes writes with REQ_FUA | REQ_PREFLUSH
1131 * which may need to go through the flush machinery depending on the
1132 * target device capabilities. Plugging such writes is fine as the flush
1133 * machinery operates at the request level, below the plug, and
1134 * completion of the flush sequence will go through the regular BIO
1135 * completion, which will handle zone write plugging.
1136 * Zone append operations for devices that requested emulation must
1137 * also be plugged so that these BIOs can be changed into regular
1138 * write BIOs.
1139 * Zone reset, reset all and finish commands need special treatment
1140 * to correctly track the write pointer offset of zones. These commands
1141 * are not plugged as we do not need serialization with write
1142 * operations. It is the responsibility of the user to not issue reset
1143 * and finish commands when write operations are in flight.
1144 */
1145 switch (bio_op(bio)) {
1146 case REQ_OP_ZONE_APPEND:
1147 if (!bdev_emulates_zone_append(bdev)) {
1148 blk_zone_wplug_handle_native_zone_append(bio);
1149 return false;
1150 }
1151 fallthrough;
1152 case REQ_OP_WRITE:
1153 case REQ_OP_WRITE_ZEROES:
1154 return blk_zone_wplug_handle_write(bio, nr_segs);
1155 case REQ_OP_ZONE_RESET:
1156 return blk_zone_wplug_handle_reset_or_finish(bio, 0);
1157 case REQ_OP_ZONE_FINISH:
1158 return blk_zone_wplug_handle_reset_or_finish(bio,
1159 bdev_zone_sectors(bdev));
1160 case REQ_OP_ZONE_RESET_ALL:
1161 return blk_zone_wplug_handle_reset_all(bio);
1162 default:
1163 return false;
1164 }
1165
1166 return false;
1167 }
1168 EXPORT_SYMBOL_GPL(blk_zone_plug_bio);
1169
disk_zone_wplug_unplug_bio(struct gendisk * disk,struct blk_zone_wplug * zwplug)1170 static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
1171 struct blk_zone_wplug *zwplug)
1172 {
1173 unsigned long flags;
1174
1175 spin_lock_irqsave(&zwplug->lock, flags);
1176
1177 /* Schedule submission of the next plugged BIO if we have one. */
1178 if (!bio_list_empty(&zwplug->bio_list)) {
1179 disk_zone_wplug_schedule_bio_work(disk, zwplug);
1180 spin_unlock_irqrestore(&zwplug->lock, flags);
1181 return;
1182 }
1183
1184 zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
1185
1186 /*
1187 * If the zone is full (it was fully written or finished, or empty
1188 * (it was reset), remove its zone write plug from the hash table.
1189 */
1190 if (disk_should_remove_zone_wplug(disk, zwplug))
1191 disk_remove_zone_wplug(disk, zwplug);
1192
1193 spin_unlock_irqrestore(&zwplug->lock, flags);
1194 }
1195
blk_zone_append_update_request_bio(struct request * rq,struct bio * bio)1196 void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio)
1197 {
1198 /*
1199 * For zone append requests, the request sector indicates the location
1200 * at which the BIO data was written. Return this value to the BIO
1201 * issuer through the BIO iter sector.
1202 * For plugged zone writes, which include emulated zone append, we need
1203 * the original BIO sector so that blk_zone_write_plug_bio_endio() can
1204 * lookup the zone write plug.
1205 */
1206 bio->bi_iter.bi_sector = rq->__sector;
1207 trace_blk_zone_append_update_request_bio(rq);
1208 }
1209
blk_zone_write_plug_bio_endio(struct bio * bio)1210 void blk_zone_write_plug_bio_endio(struct bio *bio)
1211 {
1212 struct gendisk *disk = bio->bi_bdev->bd_disk;
1213 struct blk_zone_wplug *zwplug =
1214 disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
1215 unsigned long flags;
1216
1217 if (WARN_ON_ONCE(!zwplug))
1218 return;
1219
1220 /* Make sure we do not see this BIO again by clearing the plug flag. */
1221 bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
1222
1223 /*
1224 * If this is a regular write emulating a zone append operation,
1225 * restore the original operation code.
1226 */
1227 if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) {
1228 bio->bi_opf &= ~REQ_OP_MASK;
1229 bio->bi_opf |= REQ_OP_ZONE_APPEND;
1230 bio_clear_flag(bio, BIO_EMULATES_ZONE_APPEND);
1231 }
1232
1233 /*
1234 * If the BIO failed, abort all plugged BIOs and mark the plug as
1235 * needing a write pointer update.
1236 */
1237 if (bio->bi_status != BLK_STS_OK) {
1238 spin_lock_irqsave(&zwplug->lock, flags);
1239 disk_zone_wplug_abort(zwplug);
1240 zwplug->flags |= BLK_ZONE_WPLUG_NEED_WP_UPDATE;
1241 spin_unlock_irqrestore(&zwplug->lock, flags);
1242 }
1243
1244 /* Drop the reference we took when the BIO was issued. */
1245 disk_put_zone_wplug(zwplug);
1246
1247 /*
1248 * For BIO-based devices, blk_zone_write_plug_finish_request()
1249 * is not called. So we need to schedule execution of the next
1250 * plugged BIO here.
1251 */
1252 if (bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
1253 disk_zone_wplug_unplug_bio(disk, zwplug);
1254
1255 /* Drop the reference we took when entering this function. */
1256 disk_put_zone_wplug(zwplug);
1257 }
1258
blk_zone_write_plug_finish_request(struct request * req)1259 void blk_zone_write_plug_finish_request(struct request *req)
1260 {
1261 struct gendisk *disk = req->q->disk;
1262 struct blk_zone_wplug *zwplug;
1263
1264 zwplug = disk_get_zone_wplug(disk, req->__sector);
1265 if (WARN_ON_ONCE(!zwplug))
1266 return;
1267
1268 req->rq_flags &= ~RQF_ZONE_WRITE_PLUGGING;
1269
1270 /*
1271 * Drop the reference we took when the request was initialized in
1272 * blk_zone_write_plug_init_request().
1273 */
1274 disk_put_zone_wplug(zwplug);
1275
1276 disk_zone_wplug_unplug_bio(disk, zwplug);
1277
1278 /* Drop the reference we took when entering this function. */
1279 disk_put_zone_wplug(zwplug);
1280 }
1281
blk_zone_wplug_bio_work(struct work_struct * work)1282 static void blk_zone_wplug_bio_work(struct work_struct *work)
1283 {
1284 struct blk_zone_wplug *zwplug =
1285 container_of(work, struct blk_zone_wplug, bio_work);
1286 struct block_device *bdev;
1287 unsigned long flags;
1288 struct bio *bio;
1289
1290 /*
1291 * Submit the next plugged BIO. If we do not have any, clear
1292 * the plugged flag.
1293 */
1294 spin_lock_irqsave(&zwplug->lock, flags);
1295
1296 again:
1297 bio = bio_list_pop(&zwplug->bio_list);
1298 if (!bio) {
1299 zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
1300 spin_unlock_irqrestore(&zwplug->lock, flags);
1301 goto put_zwplug;
1302 }
1303
1304 trace_blk_zone_wplug_bio(zwplug->disk->queue, zwplug->zone_no,
1305 bio->bi_iter.bi_sector, bio_sectors(bio));
1306
1307 if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
1308 blk_zone_wplug_bio_io_error(zwplug, bio);
1309 goto again;
1310 }
1311
1312 spin_unlock_irqrestore(&zwplug->lock, flags);
1313
1314 bdev = bio->bi_bdev;
1315
1316 /*
1317 * blk-mq devices will reuse the extra reference on the request queue
1318 * usage counter we took when the BIO was plugged, but the submission
1319 * path for BIO-based devices will not do that. So drop this extra
1320 * reference here.
1321 */
1322 if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO)) {
1323 bdev->bd_disk->fops->submit_bio(bio);
1324 blk_queue_exit(bdev->bd_disk->queue);
1325 } else {
1326 blk_mq_submit_bio(bio);
1327 }
1328
1329 put_zwplug:
1330 /* Drop the reference we took in disk_zone_wplug_schedule_bio_work(). */
1331 disk_put_zone_wplug(zwplug);
1332 }
1333
disk_zone_wplugs_hash_size(struct gendisk * disk)1334 static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
1335 {
1336 return 1U << disk->zone_wplugs_hash_bits;
1337 }
1338
disk_init_zone_resources(struct gendisk * disk)1339 void disk_init_zone_resources(struct gendisk *disk)
1340 {
1341 spin_lock_init(&disk->zone_wplugs_lock);
1342 }
1343
1344 /*
1345 * For the size of a disk zone write plug hash table, use the size of the
1346 * zone write plug mempool, which is the maximum of the disk open zones and
1347 * active zones limits. But do not exceed 4KB (512 hlist head entries), that is,
1348 * 9 bits. For a disk that has no limits, mempool size defaults to 128.
1349 */
1350 #define BLK_ZONE_WPLUG_MAX_HASH_BITS 9
1351 #define BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE 128
1352
disk_alloc_zone_resources(struct gendisk * disk,unsigned int pool_size)1353 static int disk_alloc_zone_resources(struct gendisk *disk,
1354 unsigned int pool_size)
1355 {
1356 unsigned int i;
1357
1358 atomic_set(&disk->nr_zone_wplugs, 0);
1359 disk->zone_wplugs_hash_bits =
1360 min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
1361
1362 disk->zone_wplugs_hash =
1363 kcalloc(disk_zone_wplugs_hash_size(disk),
1364 sizeof(struct hlist_head), GFP_KERNEL);
1365 if (!disk->zone_wplugs_hash)
1366 return -ENOMEM;
1367
1368 for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++)
1369 INIT_HLIST_HEAD(&disk->zone_wplugs_hash[i]);
1370
1371 disk->zone_wplugs_pool = mempool_create_kmalloc_pool(pool_size,
1372 sizeof(struct blk_zone_wplug));
1373 if (!disk->zone_wplugs_pool)
1374 goto free_hash;
1375
1376 disk->zone_wplugs_wq =
1377 alloc_workqueue("%s_zwplugs", WQ_MEM_RECLAIM | WQ_HIGHPRI,
1378 pool_size, disk->disk_name);
1379 if (!disk->zone_wplugs_wq)
1380 goto destroy_pool;
1381
1382 return 0;
1383
1384 destroy_pool:
1385 mempool_destroy(disk->zone_wplugs_pool);
1386 disk->zone_wplugs_pool = NULL;
1387 free_hash:
1388 kfree(disk->zone_wplugs_hash);
1389 disk->zone_wplugs_hash = NULL;
1390 disk->zone_wplugs_hash_bits = 0;
1391 return -ENOMEM;
1392 }
1393
disk_destroy_zone_wplugs_hash_table(struct gendisk * disk)1394 static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
1395 {
1396 struct blk_zone_wplug *zwplug;
1397 unsigned int i;
1398
1399 if (!disk->zone_wplugs_hash)
1400 return;
1401
1402 /* Free all the zone write plugs we have. */
1403 for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) {
1404 while (!hlist_empty(&disk->zone_wplugs_hash[i])) {
1405 zwplug = hlist_entry(disk->zone_wplugs_hash[i].first,
1406 struct blk_zone_wplug, node);
1407 refcount_inc(&zwplug->ref);
1408 disk_remove_zone_wplug(disk, zwplug);
1409 disk_put_zone_wplug(zwplug);
1410 }
1411 }
1412
1413 WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs));
1414 kfree(disk->zone_wplugs_hash);
1415 disk->zone_wplugs_hash = NULL;
1416 disk->zone_wplugs_hash_bits = 0;
1417 }
1418
disk_set_conv_zones_bitmap(struct gendisk * disk,unsigned long * bitmap)1419 static unsigned int disk_set_conv_zones_bitmap(struct gendisk *disk,
1420 unsigned long *bitmap)
1421 {
1422 unsigned int nr_conv_zones = 0;
1423 unsigned long flags;
1424
1425 spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
1426 if (bitmap)
1427 nr_conv_zones = bitmap_weight(bitmap, disk->nr_zones);
1428 bitmap = rcu_replace_pointer(disk->conv_zones_bitmap, bitmap,
1429 lockdep_is_held(&disk->zone_wplugs_lock));
1430 spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
1431
1432 kfree_rcu_mightsleep(bitmap);
1433
1434 return nr_conv_zones;
1435 }
1436
disk_free_zone_resources(struct gendisk * disk)1437 void disk_free_zone_resources(struct gendisk *disk)
1438 {
1439 if (!disk->zone_wplugs_pool)
1440 return;
1441
1442 if (disk->zone_wplugs_wq) {
1443 destroy_workqueue(disk->zone_wplugs_wq);
1444 disk->zone_wplugs_wq = NULL;
1445 }
1446
1447 disk_destroy_zone_wplugs_hash_table(disk);
1448
1449 /*
1450 * Wait for the zone write plugs to be RCU-freed before
1451 * destorying the mempool.
1452 */
1453 rcu_barrier();
1454
1455 mempool_destroy(disk->zone_wplugs_pool);
1456 disk->zone_wplugs_pool = NULL;
1457
1458 disk_set_conv_zones_bitmap(disk, NULL);
1459 disk->zone_capacity = 0;
1460 disk->last_zone_capacity = 0;
1461 disk->nr_zones = 0;
1462 }
1463
disk_need_zone_resources(struct gendisk * disk)1464 static inline bool disk_need_zone_resources(struct gendisk *disk)
1465 {
1466 /*
1467 * All mq zoned devices need zone resources so that the block layer
1468 * can automatically handle write BIO plugging. BIO-based device drivers
1469 * (e.g. DM devices) are normally responsible for handling zone write
1470 * ordering and do not need zone resources, unless the driver requires
1471 * zone append emulation.
1472 */
1473 return queue_is_mq(disk->queue) ||
1474 queue_emulates_zone_append(disk->queue);
1475 }
1476
disk_revalidate_zone_resources(struct gendisk * disk,unsigned int nr_zones)1477 static int disk_revalidate_zone_resources(struct gendisk *disk,
1478 unsigned int nr_zones)
1479 {
1480 struct queue_limits *lim = &disk->queue->limits;
1481 unsigned int pool_size;
1482
1483 if (!disk_need_zone_resources(disk))
1484 return 0;
1485
1486 /*
1487 * If the device has no limit on the maximum number of open and active
1488 * zones, use BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE.
1489 */
1490 pool_size = max(lim->max_open_zones, lim->max_active_zones);
1491 if (!pool_size)
1492 pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_zones);
1493
1494 if (!disk->zone_wplugs_hash)
1495 return disk_alloc_zone_resources(disk, pool_size);
1496
1497 return 0;
1498 }
1499
1500 struct blk_revalidate_zone_args {
1501 struct gendisk *disk;
1502 unsigned long *conv_zones_bitmap;
1503 unsigned int nr_zones;
1504 unsigned int zone_capacity;
1505 unsigned int last_zone_capacity;
1506 sector_t sector;
1507 };
1508
1509 /*
1510 * Update the disk zone resources information and device queue limits.
1511 * The disk queue is frozen when this is executed.
1512 */
disk_update_zone_resources(struct gendisk * disk,struct blk_revalidate_zone_args * args)1513 static int disk_update_zone_resources(struct gendisk *disk,
1514 struct blk_revalidate_zone_args *args)
1515 {
1516 struct request_queue *q = disk->queue;
1517 unsigned int nr_seq_zones, nr_conv_zones;
1518 unsigned int pool_size;
1519 struct queue_limits lim;
1520
1521 disk->nr_zones = args->nr_zones;
1522 disk->zone_capacity = args->zone_capacity;
1523 disk->last_zone_capacity = args->last_zone_capacity;
1524 nr_conv_zones =
1525 disk_set_conv_zones_bitmap(disk, args->conv_zones_bitmap);
1526 if (nr_conv_zones >= disk->nr_zones) {
1527 pr_warn("%s: Invalid number of conventional zones %u / %u\n",
1528 disk->disk_name, nr_conv_zones, disk->nr_zones);
1529 return -ENODEV;
1530 }
1531
1532 lim = queue_limits_start_update(q);
1533
1534 /*
1535 * Some devices can advertize zone resource limits that are larger than
1536 * the number of sequential zones of the zoned block device, e.g. a
1537 * small ZNS namespace. For such case, assume that the zoned device has
1538 * no zone resource limits.
1539 */
1540 nr_seq_zones = disk->nr_zones - nr_conv_zones;
1541 if (lim.max_open_zones >= nr_seq_zones)
1542 lim.max_open_zones = 0;
1543 if (lim.max_active_zones >= nr_seq_zones)
1544 lim.max_active_zones = 0;
1545
1546 if (!disk->zone_wplugs_pool)
1547 goto commit;
1548
1549 /*
1550 * If the device has no limit on the maximum number of open and active
1551 * zones, set its max open zone limit to the mempool size to indicate
1552 * to the user that there is a potential performance impact due to
1553 * dynamic zone write plug allocation when simultaneously writing to
1554 * more zones than the size of the mempool.
1555 */
1556 pool_size = max(lim.max_open_zones, lim.max_active_zones);
1557 if (!pool_size)
1558 pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_seq_zones);
1559
1560 mempool_resize(disk->zone_wplugs_pool, pool_size);
1561
1562 if (!lim.max_open_zones && !lim.max_active_zones) {
1563 if (pool_size < nr_seq_zones)
1564 lim.max_open_zones = pool_size;
1565 else
1566 lim.max_open_zones = 0;
1567 }
1568
1569 commit:
1570 return queue_limits_commit_update_frozen(q, &lim);
1571 }
1572
blk_revalidate_conv_zone(struct blk_zone * zone,unsigned int idx,struct blk_revalidate_zone_args * args)1573 static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
1574 struct blk_revalidate_zone_args *args)
1575 {
1576 struct gendisk *disk = args->disk;
1577
1578 if (zone->capacity != zone->len) {
1579 pr_warn("%s: Invalid conventional zone capacity\n",
1580 disk->disk_name);
1581 return -ENODEV;
1582 }
1583
1584 if (disk_zone_is_last(disk, zone))
1585 args->last_zone_capacity = zone->capacity;
1586
1587 if (!disk_need_zone_resources(disk))
1588 return 0;
1589
1590 if (!args->conv_zones_bitmap) {
1591 args->conv_zones_bitmap =
1592 bitmap_zalloc(args->nr_zones, GFP_NOIO);
1593 if (!args->conv_zones_bitmap)
1594 return -ENOMEM;
1595 }
1596
1597 set_bit(idx, args->conv_zones_bitmap);
1598
1599 return 0;
1600 }
1601
blk_revalidate_seq_zone(struct blk_zone * zone,unsigned int idx,struct blk_revalidate_zone_args * args)1602 static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
1603 struct blk_revalidate_zone_args *args)
1604 {
1605 struct gendisk *disk = args->disk;
1606 struct blk_zone_wplug *zwplug;
1607 unsigned int wp_offset;
1608 unsigned long flags;
1609
1610 /*
1611 * Remember the capacity of the first sequential zone and check
1612 * if it is constant for all zones, ignoring the last zone as it can be
1613 * smaller.
1614 */
1615 if (!args->zone_capacity)
1616 args->zone_capacity = zone->capacity;
1617 if (disk_zone_is_last(disk, zone)) {
1618 args->last_zone_capacity = zone->capacity;
1619 } else if (zone->capacity != args->zone_capacity) {
1620 pr_warn("%s: Invalid variable zone capacity\n",
1621 disk->disk_name);
1622 return -ENODEV;
1623 }
1624
1625 /*
1626 * If the device needs zone append emulation, we need to track the
1627 * write pointer of all zones that are not empty nor full. So make sure
1628 * we have a zone write plug for such zone if the device has a zone
1629 * write plug hash table.
1630 */
1631 if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
1632 return 0;
1633
1634 disk_zone_wplug_sync_wp_offset(disk, zone);
1635
1636 wp_offset = blk_zone_wp_offset(zone);
1637 if (!wp_offset || wp_offset >= zone->capacity)
1638 return 0;
1639
1640 zwplug = disk_get_and_lock_zone_wplug(disk, zone->wp, GFP_NOIO, &flags);
1641 if (!zwplug)
1642 return -ENOMEM;
1643 spin_unlock_irqrestore(&zwplug->lock, flags);
1644 disk_put_zone_wplug(zwplug);
1645
1646 return 0;
1647 }
1648
1649 /*
1650 * Helper function to check the validity of zones of a zoned block device.
1651 */
blk_revalidate_zone_cb(struct blk_zone * zone,unsigned int idx,void * data)1652 static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
1653 void *data)
1654 {
1655 struct blk_revalidate_zone_args *args = data;
1656 struct gendisk *disk = args->disk;
1657 sector_t zone_sectors = disk->queue->limits.chunk_sectors;
1658 int ret;
1659
1660 /* Check for bad zones and holes in the zone report */
1661 if (zone->start != args->sector) {
1662 pr_warn("%s: Zone gap at sectors %llu..%llu\n",
1663 disk->disk_name, args->sector, zone->start);
1664 return -ENODEV;
1665 }
1666
1667 if (zone->start >= get_capacity(disk) || !zone->len) {
1668 pr_warn("%s: Invalid zone start %llu, length %llu\n",
1669 disk->disk_name, zone->start, zone->len);
1670 return -ENODEV;
1671 }
1672
1673 /*
1674 * All zones must have the same size, with the exception on an eventual
1675 * smaller last zone.
1676 */
1677 if (!disk_zone_is_last(disk, zone)) {
1678 if (zone->len != zone_sectors) {
1679 pr_warn("%s: Invalid zoned device with non constant zone size\n",
1680 disk->disk_name);
1681 return -ENODEV;
1682 }
1683 } else if (zone->len > zone_sectors) {
1684 pr_warn("%s: Invalid zoned device with larger last zone size\n",
1685 disk->disk_name);
1686 return -ENODEV;
1687 }
1688
1689 if (!zone->capacity || zone->capacity > zone->len) {
1690 pr_warn("%s: Invalid zone capacity\n",
1691 disk->disk_name);
1692 return -ENODEV;
1693 }
1694
1695 /* Check zone type */
1696 switch (zone->type) {
1697 case BLK_ZONE_TYPE_CONVENTIONAL:
1698 ret = blk_revalidate_conv_zone(zone, idx, args);
1699 break;
1700 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1701 ret = blk_revalidate_seq_zone(zone, idx, args);
1702 break;
1703 case BLK_ZONE_TYPE_SEQWRITE_PREF:
1704 default:
1705 pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
1706 disk->disk_name, (int)zone->type, zone->start);
1707 ret = -ENODEV;
1708 }
1709
1710 if (!ret)
1711 args->sector += zone->len;
1712
1713 return ret;
1714 }
1715
1716 /**
1717 * blk_revalidate_disk_zones - (re)allocate and initialize zone write plugs
1718 * @disk: Target disk
1719 *
1720 * Helper function for low-level device drivers to check, (re) allocate and
1721 * initialize resources used for managing zoned disks. This function should
1722 * normally be called by blk-mq based drivers when a zoned gendisk is probed
1723 * and when the zone configuration of the gendisk changes (e.g. after a format).
1724 * Before calling this function, the device driver must already have set the
1725 * device zone size (chunk_sector limit) and the max zone append limit.
1726 * BIO based drivers can also use this function as long as the device queue
1727 * can be safely frozen.
1728 */
blk_revalidate_disk_zones(struct gendisk * disk)1729 int blk_revalidate_disk_zones(struct gendisk *disk)
1730 {
1731 struct request_queue *q = disk->queue;
1732 sector_t zone_sectors = q->limits.chunk_sectors;
1733 sector_t capacity = get_capacity(disk);
1734 struct blk_revalidate_zone_args args = { };
1735 unsigned int noio_flag;
1736 int ret = -ENOMEM;
1737
1738 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
1739 return -EIO;
1740
1741 if (!capacity)
1742 return -ENODEV;
1743
1744 /*
1745 * Checks that the device driver indicated a valid zone size and that
1746 * the max zone append limit is set.
1747 */
1748 if (!zone_sectors || !is_power_of_2(zone_sectors)) {
1749 pr_warn("%s: Invalid non power of two zone size (%llu)\n",
1750 disk->disk_name, zone_sectors);
1751 return -ENODEV;
1752 }
1753
1754 /*
1755 * Ensure that all memory allocations in this context are done as if
1756 * GFP_NOIO was specified.
1757 */
1758 args.disk = disk;
1759 args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors);
1760 noio_flag = memalloc_noio_save();
1761 ret = disk_revalidate_zone_resources(disk, args.nr_zones);
1762 if (ret) {
1763 memalloc_noio_restore(noio_flag);
1764 return ret;
1765 }
1766
1767 ret = disk->fops->report_zones(disk, 0, UINT_MAX,
1768 blk_revalidate_zone_cb, &args);
1769 if (!ret) {
1770 pr_warn("%s: No zones reported\n", disk->disk_name);
1771 ret = -ENODEV;
1772 }
1773 memalloc_noio_restore(noio_flag);
1774
1775 /*
1776 * If zones where reported, make sure that the entire disk capacity
1777 * has been checked.
1778 */
1779 if (ret > 0 && args.sector != capacity) {
1780 pr_warn("%s: Missing zones from sector %llu\n",
1781 disk->disk_name, args.sector);
1782 ret = -ENODEV;
1783 }
1784
1785 /*
1786 * Set the new disk zone parameters only once the queue is frozen and
1787 * all I/Os are completed.
1788 */
1789 if (ret > 0)
1790 ret = disk_update_zone_resources(disk, &args);
1791 else
1792 pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
1793 if (ret) {
1794 unsigned int memflags = blk_mq_freeze_queue(q);
1795
1796 disk_free_zone_resources(disk);
1797 blk_mq_unfreeze_queue(q, memflags);
1798 }
1799
1800 return ret;
1801 }
1802 EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
1803
1804 /**
1805 * blk_zone_issue_zeroout - zero-fill a block range in a zone
1806 * @bdev: blockdev to write
1807 * @sector: start sector
1808 * @nr_sects: number of sectors to write
1809 * @gfp_mask: memory allocation flags (for bio_alloc)
1810 *
1811 * Description:
1812 * Zero-fill a block range in a zone (@sector must be equal to the zone write
1813 * pointer), handling potential errors due to the (initially unknown) lack of
1814 * hardware offload (See blkdev_issue_zeroout()).
1815 */
blk_zone_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask)1816 int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
1817 sector_t nr_sects, gfp_t gfp_mask)
1818 {
1819 int ret;
1820
1821 if (WARN_ON_ONCE(!bdev_is_zoned(bdev)))
1822 return -EIO;
1823
1824 ret = blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
1825 BLKDEV_ZERO_NOFALLBACK);
1826 if (ret != -EOPNOTSUPP)
1827 return ret;
1828
1829 /*
1830 * The failed call to blkdev_issue_zeroout() advanced the zone write
1831 * pointer. Undo this using a report zone to update the zone write
1832 * pointer to the correct current value.
1833 */
1834 ret = disk_zone_sync_wp_offset(bdev->bd_disk, sector);
1835 if (ret != 1)
1836 return ret < 0 ? ret : -EIO;
1837
1838 /*
1839 * Retry without BLKDEV_ZERO_NOFALLBACK to force the fallback to a
1840 * regular write with zero-pages.
1841 */
1842 return blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 0);
1843 }
1844 EXPORT_SYMBOL_GPL(blk_zone_issue_zeroout);
1845
1846 #ifdef CONFIG_BLK_DEBUG_FS
queue_zone_wplug_show(struct blk_zone_wplug * zwplug,struct seq_file * m)1847 static void queue_zone_wplug_show(struct blk_zone_wplug *zwplug,
1848 struct seq_file *m)
1849 {
1850 unsigned int zwp_wp_offset, zwp_flags;
1851 unsigned int zwp_zone_no, zwp_ref;
1852 unsigned int zwp_bio_list_size;
1853 unsigned long flags;
1854
1855 spin_lock_irqsave(&zwplug->lock, flags);
1856 zwp_zone_no = zwplug->zone_no;
1857 zwp_flags = zwplug->flags;
1858 zwp_ref = refcount_read(&zwplug->ref);
1859 zwp_wp_offset = zwplug->wp_offset;
1860 zwp_bio_list_size = bio_list_size(&zwplug->bio_list);
1861 spin_unlock_irqrestore(&zwplug->lock, flags);
1862
1863 seq_printf(m, "%u 0x%x %u %u %u\n", zwp_zone_no, zwp_flags, zwp_ref,
1864 zwp_wp_offset, zwp_bio_list_size);
1865 }
1866
queue_zone_wplugs_show(void * data,struct seq_file * m)1867 int queue_zone_wplugs_show(void *data, struct seq_file *m)
1868 {
1869 struct request_queue *q = data;
1870 struct gendisk *disk = q->disk;
1871 struct blk_zone_wplug *zwplug;
1872 unsigned int i;
1873
1874 if (!disk->zone_wplugs_hash)
1875 return 0;
1876
1877 rcu_read_lock();
1878 for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++)
1879 hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[i],
1880 node)
1881 queue_zone_wplug_show(zwplug, m);
1882 rcu_read_unlock();
1883
1884 return 0;
1885 }
1886
1887 #endif
1888