1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 */
5
6 #include <linux/blkdev.h>
7 #include <linux/mm.h>
8 #include <linux/sched/mm.h>
9 #include <linux/slab.h>
10 #include <linux/bitmap.h>
11
12 #include "dm-core.h"
13
14 #define DM_MSG_PREFIX "zone"
15
16 /*
17 * For internal zone reports bypassing the top BIO submission path.
18 */
dm_blk_do_report_zones(struct mapped_device * md,struct dm_table * t,unsigned int nr_zones,struct dm_report_zones_args * args)19 static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t,
20 unsigned int nr_zones,
21 struct dm_report_zones_args *args)
22 {
23 do {
24 struct dm_target *tgt;
25 int ret;
26
27 tgt = dm_table_find_target(t, args->next_sector);
28 if (WARN_ON_ONCE(!tgt->type->report_zones))
29 return -EIO;
30
31 args->tgt = tgt;
32 ret = tgt->type->report_zones(tgt, args,
33 nr_zones - args->zone_idx);
34 if (ret < 0)
35 return ret;
36 } while (args->zone_idx < nr_zones &&
37 args->next_sector < get_capacity(md->disk));
38
39 return args->zone_idx;
40 }
41
42 /*
43 * User facing dm device block device report zone operation. This calls the
44 * report_zones operation for each target of a device table. This operation is
45 * generally implemented by targets using dm_report_zones().
46 */
dm_blk_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,struct blk_report_zones_args * args)47 int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
48 unsigned int nr_zones,
49 struct blk_report_zones_args *args)
50 {
51 struct mapped_device *md = disk->private_data;
52 struct dm_table *map;
53 struct dm_table *zone_revalidate_map = READ_ONCE(md->zone_revalidate_map);
54 int srcu_idx, ret = -EIO;
55 bool put_table = false;
56
57 if (!zone_revalidate_map || md->revalidate_map_task != current) {
58 /*
59 * Regular user context or
60 * Zone revalidation during __bind() is in progress, but this
61 * call is from a different process
62 */
63 map = dm_get_live_table(md, &srcu_idx);
64 put_table = true;
65
66 if (dm_suspended_md(md)) {
67 ret = -EAGAIN;
68 goto do_put_table;
69 }
70 } else {
71 /* Zone revalidation during __bind() */
72 map = zone_revalidate_map;
73 }
74
75 if (map) {
76 struct dm_report_zones_args dm_args = {
77 .disk = md->disk,
78 .next_sector = sector,
79 .rep_args = args,
80 };
81 ret = dm_blk_do_report_zones(md, map, nr_zones, &dm_args);
82 }
83
84 do_put_table:
85 if (put_table)
86 dm_put_live_table(md, srcu_idx);
87
88 return ret;
89 }
90
dm_report_zones_cb(struct blk_zone * zone,unsigned int idx,void * data)91 static int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx,
92 void *data)
93 {
94 struct dm_report_zones_args *args = data;
95 sector_t sector_diff = args->tgt->begin - args->start;
96
97 /*
98 * Ignore zones beyond the target range.
99 */
100 if (zone->start >= args->start + args->tgt->len)
101 return 0;
102
103 /*
104 * Remap the start sector and write pointer position of the zone
105 * to match its position in the target range.
106 */
107 zone->start += sector_diff;
108 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
109 if (zone->cond == BLK_ZONE_COND_FULL)
110 zone->wp = zone->start + zone->len;
111 else if (zone->cond == BLK_ZONE_COND_EMPTY)
112 zone->wp = zone->start;
113 else
114 zone->wp += sector_diff;
115 }
116
117 args->next_sector = zone->start + zone->len;
118
119 /* If we have an internal callback, call it first. */
120 if (args->cb) {
121 int ret;
122
123 ret = args->cb(zone, args->zone_idx, args->data);
124 if (ret)
125 return ret;
126 }
127
128 return disk_report_zone(args->disk, zone, args->zone_idx++,
129 args->rep_args);
130 }
131
132 /*
133 * Helper for drivers of zoned targets to implement struct target_type
134 * report_zones operation.
135 */
dm_report_zones(struct block_device * bdev,sector_t start,sector_t sector,struct dm_report_zones_args * args,unsigned int nr_zones)136 int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
137 struct dm_report_zones_args *args, unsigned int nr_zones)
138 {
139 /*
140 * Set the target mapping start sector first so that
141 * dm_report_zones_cb() can correctly remap zone information.
142 */
143 args->start = start;
144
145 return blkdev_report_zones(bdev, sector, nr_zones,
146 dm_report_zones_cb, args);
147 }
148 EXPORT_SYMBOL_GPL(dm_report_zones);
149
dm_is_zone_write(struct mapped_device * md,struct bio * bio)150 bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
151 {
152 struct request_queue *q = md->queue;
153
154 if (!blk_queue_is_zoned(q))
155 return false;
156
157 switch (bio_op(bio)) {
158 case REQ_OP_WRITE_ZEROES:
159 case REQ_OP_WRITE:
160 return !op_is_flush(bio->bi_opf) && bio_sectors(bio);
161 default:
162 return false;
163 }
164 }
165
166 /*
167 * Revalidate the zones of a mapped device to initialize resource necessary
168 * for zone append emulation. Note that we cannot simply use the block layer
169 * blk_revalidate_disk_zones() function here as the mapped device is suspended
170 * (this is called from __bind() context).
171 */
dm_revalidate_zones(struct dm_table * t,struct request_queue * q)172 int dm_revalidate_zones(struct dm_table *t, struct request_queue *q)
173 {
174 struct mapped_device *md = t->md;
175 struct gendisk *disk = md->disk;
176 unsigned int nr_zones = disk->nr_zones;
177 int ret;
178
179 if (!get_capacity(disk))
180 return 0;
181
182 /*
183 * Do not revalidate if zone write plug resources have already
184 * been allocated.
185 */
186 if (dm_has_zone_plugs(md))
187 return 0;
188
189 DMINFO("%s using %s zone append", disk->disk_name,
190 queue_emulates_zone_append(q) ? "emulated" : "native");
191
192 /*
193 * Our table is not live yet. So the call to dm_get_live_table()
194 * in dm_blk_report_zones() will fail. Set a temporary pointer to
195 * our table for dm_blk_report_zones() to use directly.
196 */
197 md->zone_revalidate_map = t;
198 md->revalidate_map_task = current;
199 ret = blk_revalidate_disk_zones(disk);
200 md->revalidate_map_task = NULL;
201 md->zone_revalidate_map = NULL;
202
203 if (ret) {
204 DMERR("Revalidate zones failed %d", ret);
205 disk->nr_zones = nr_zones;
206 return ret;
207 }
208
209 return 0;
210 }
211
device_not_zone_append_capable(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)212 static int device_not_zone_append_capable(struct dm_target *ti,
213 struct dm_dev *dev, sector_t start,
214 sector_t len, void *data)
215 {
216 return !bdev_is_zoned(dev->bdev);
217 }
218
dm_table_supports_zone_append(struct dm_table * t)219 static bool dm_table_supports_zone_append(struct dm_table *t)
220 {
221 for (unsigned int i = 0; i < t->num_targets; i++) {
222 struct dm_target *ti = dm_table_get_target(t, i);
223
224 if (ti->emulate_zone_append)
225 return false;
226
227 if (!ti->type->iterate_devices ||
228 ti->type->iterate_devices(ti, device_not_zone_append_capable, NULL))
229 return false;
230 }
231
232 return true;
233 }
234
235 struct dm_device_zone_count {
236 sector_t start;
237 sector_t len;
238 unsigned int total_nr_seq_zones;
239 unsigned int target_nr_seq_zones;
240 };
241
242 /*
243 * Count the total number of and the number of mapped sequential zones of a
244 * target zoned device.
245 */
dm_device_count_zones_cb(struct blk_zone * zone,unsigned int idx,void * data)246 static int dm_device_count_zones_cb(struct blk_zone *zone,
247 unsigned int idx, void *data)
248 {
249 struct dm_device_zone_count *zc = data;
250
251 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
252 zc->total_nr_seq_zones++;
253 if (zone->start >= zc->start &&
254 zone->start < zc->start + zc->len)
255 zc->target_nr_seq_zones++;
256 }
257
258 return 0;
259 }
260
dm_device_count_zones(struct dm_dev * dev,struct dm_device_zone_count * zc)261 static int dm_device_count_zones(struct dm_dev *dev,
262 struct dm_device_zone_count *zc)
263 {
264 int ret;
265
266 ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES,
267 dm_device_count_zones_cb, zc);
268 if (ret < 0)
269 return ret;
270 if (!ret)
271 return -EIO;
272 return 0;
273 }
274
275 struct dm_zone_resource_limits {
276 unsigned int mapped_nr_seq_zones;
277 struct queue_limits *lim;
278 bool reliable_limits;
279 };
280
device_get_zone_resource_limits(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)281 static int device_get_zone_resource_limits(struct dm_target *ti,
282 struct dm_dev *dev, sector_t start,
283 sector_t len, void *data)
284 {
285 struct dm_zone_resource_limits *zlim = data;
286 struct gendisk *disk = dev->bdev->bd_disk;
287 unsigned int max_open_zones, max_active_zones;
288 int ret;
289 struct dm_device_zone_count zc = {
290 .start = start,
291 .len = len,
292 };
293
294 /*
295 * If the target is not the whole device, the device zone resources may
296 * be shared between different targets. Check this by counting the
297 * number of mapped sequential zones: if this number is smaller than the
298 * total number of sequential zones of the target device, then resource
299 * sharing may happen and the zone limits will not be reliable.
300 */
301 ret = dm_device_count_zones(dev, &zc);
302 if (ret) {
303 DMERR("Count %s zones failed %d", disk->disk_name, ret);
304 return ret;
305 }
306
307 /*
308 * If the target does not map any sequential zones, then we do not need
309 * any zone resource limits.
310 */
311 if (!zc.target_nr_seq_zones)
312 return 0;
313
314 /*
315 * If the target does not map all sequential zones, the limits
316 * will not be reliable and we cannot use REQ_OP_ZONE_RESET_ALL.
317 */
318 if (zc.target_nr_seq_zones < zc.total_nr_seq_zones) {
319 zlim->reliable_limits = false;
320 ti->zone_reset_all_supported = false;
321 }
322
323 /*
324 * If the target maps less sequential zones than the limit values, then
325 * we do not have limits for this target.
326 */
327 max_active_zones = disk->queue->limits.max_active_zones;
328 if (max_active_zones >= zc.target_nr_seq_zones)
329 max_active_zones = 0;
330 zlim->lim->max_active_zones =
331 min_not_zero(max_active_zones, zlim->lim->max_active_zones);
332
333 max_open_zones = disk->queue->limits.max_open_zones;
334 if (max_open_zones >= zc.target_nr_seq_zones)
335 max_open_zones = 0;
336 zlim->lim->max_open_zones =
337 min_not_zero(max_open_zones, zlim->lim->max_open_zones);
338
339 /*
340 * Also count the total number of sequential zones for the mapped
341 * device so that when we are done inspecting all its targets, we are
342 * able to check if the mapped device actually has any sequential zones.
343 */
344 zlim->mapped_nr_seq_zones += zc.target_nr_seq_zones;
345
346 return 0;
347 }
348
dm_set_zones_restrictions(struct dm_table * t,struct request_queue * q,struct queue_limits * lim)349 int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
350 struct queue_limits *lim)
351 {
352 struct mapped_device *md = t->md;
353 struct gendisk *disk = md->disk;
354 struct dm_zone_resource_limits zlim = {
355 .reliable_limits = true,
356 .lim = lim,
357 };
358
359 /*
360 * Check if zone append is natively supported, and if not, set the
361 * mapped device queue as needing zone append emulation. If zone
362 * append is natively supported, make sure that
363 * max_hw_zone_append_sectors is not set to 0.
364 */
365 WARN_ON_ONCE(queue_is_mq(q));
366 if (!dm_table_supports_zone_append(t))
367 lim->max_hw_zone_append_sectors = 0;
368 else if (lim->max_hw_zone_append_sectors == 0)
369 lim->max_hw_zone_append_sectors = lim->max_zone_append_sectors;
370
371 /*
372 * Determine the max open and max active zone limits for the mapped
373 * device by inspecting the zone resource limits and the zones mapped
374 * by each target.
375 */
376 for (unsigned int i = 0; i < t->num_targets; i++) {
377 struct dm_target *ti = dm_table_get_target(t, i);
378
379 /*
380 * Assume that the target can accept REQ_OP_ZONE_RESET_ALL.
381 * device_get_zone_resource_limits() may adjust this if one of
382 * the device used by the target does not have all its
383 * sequential write required zones mapped.
384 */
385 ti->zone_reset_all_supported = true;
386
387 if (!ti->type->iterate_devices ||
388 ti->type->iterate_devices(ti,
389 device_get_zone_resource_limits, &zlim)) {
390 DMERR("Could not determine %s zone resource limits",
391 disk->disk_name);
392 return -ENODEV;
393 }
394 }
395
396 /*
397 * If we only have conventional zones mapped, expose the mapped device
398 + as a regular device.
399 */
400 if (!zlim.mapped_nr_seq_zones) {
401 lim->max_open_zones = 0;
402 lim->max_active_zones = 0;
403 lim->max_hw_zone_append_sectors = 0;
404 lim->max_zone_append_sectors = 0;
405 lim->zone_write_granularity = 0;
406 lim->chunk_sectors = 0;
407 lim->features &= ~BLK_FEAT_ZONED;
408 return 0;
409 }
410
411 if (get_capacity(disk) && dm_has_zone_plugs(t->md)) {
412 if (q->limits.chunk_sectors != lim->chunk_sectors) {
413 DMWARN("%s: device has zone write plug resources. "
414 "Cannot change zone size",
415 disk->disk_name);
416 return -EINVAL;
417 }
418 if (lim->max_hw_zone_append_sectors != 0 &&
419 !dm_table_is_wildcard(t)) {
420 DMWARN("%s: device has zone write plug resources. "
421 "New table must emulate zone append",
422 disk->disk_name);
423 return -EINVAL;
424 }
425 }
426 /*
427 * Warn once (when the capacity is not yet set) if the mapped device is
428 * partially using zone resources of the target devices as that leads to
429 * unreliable limits, i.e. if another mapped device uses the same
430 * underlying devices, we cannot enforce zone limits to guarantee that
431 * writing will not lead to errors. Note that we really should return
432 * an error for such case but there is no easy way to find out if
433 * another mapped device uses the same underlying zoned devices.
434 */
435 if (!get_capacity(disk) && !zlim.reliable_limits)
436 DMWARN("%s zone resource limits may be unreliable",
437 disk->disk_name);
438
439 if (lim->features & BLK_FEAT_ZONED &&
440 !static_key_enabled(&zoned_enabled.key))
441 static_branch_enable(&zoned_enabled);
442 return 0;
443 }
444
dm_finalize_zone_settings(struct dm_table * t,struct queue_limits * lim)445 void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim)
446 {
447 struct mapped_device *md = t->md;
448
449 if (lim->features & BLK_FEAT_ZONED) {
450 if (dm_table_supports_zone_append(t))
451 clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
452 else
453 set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
454 } else {
455 clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
456 md->disk->nr_zones = 0;
457 }
458 }
459
460
461 /*
462 * IO completion callback called from clone_endio().
463 */
dm_zone_endio(struct dm_io * io,struct bio * clone)464 void dm_zone_endio(struct dm_io *io, struct bio *clone)
465 {
466 struct mapped_device *md = io->md;
467 struct gendisk *disk = md->disk;
468 struct bio *orig_bio = io->orig_bio;
469
470 /*
471 * Get the offset within the zone of the written sector
472 * and add that to the original bio sector position.
473 */
474 if (clone->bi_status == BLK_STS_OK &&
475 bio_op(clone) == REQ_OP_ZONE_APPEND) {
476 orig_bio->bi_iter.bi_sector +=
477 bdev_offset_from_zone_start(disk->part0,
478 clone->bi_iter.bi_sector);
479 }
480 }
481
dm_zone_need_reset_cb(struct blk_zone * zone,unsigned int idx,void * data)482 static int dm_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
483 void *data)
484 {
485 /*
486 * For an all-zones reset, ignore conventional, empty, read-only
487 * and offline zones.
488 */
489 switch (zone->cond) {
490 case BLK_ZONE_COND_NOT_WP:
491 case BLK_ZONE_COND_EMPTY:
492 case BLK_ZONE_COND_READONLY:
493 case BLK_ZONE_COND_OFFLINE:
494 return 0;
495 default:
496 set_bit(idx, (unsigned long *)data);
497 return 0;
498 }
499 }
500
dm_zone_get_reset_bitmap(struct mapped_device * md,struct dm_table * t,sector_t sector,unsigned int nr_zones,unsigned long * need_reset)501 int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
502 sector_t sector, unsigned int nr_zones,
503 unsigned long *need_reset)
504 {
505 struct dm_report_zones_args args = {
506 .disk = md->disk,
507 .next_sector = sector,
508 .cb = dm_zone_need_reset_cb,
509 .data = need_reset,
510 };
511 int ret;
512
513 ret = dm_blk_do_report_zones(md, t, nr_zones, &args);
514 if (ret != nr_zones) {
515 DMERR("Get %s zone reset bitmap failed\n",
516 md->disk->disk_name);
517 return -EIO;
518 }
519
520 return 0;
521 }
522