xref: /linux/drivers/md/dm-vdo/data-vio.c (revision a5f998094fa344cdd1342164948abb4d7c6101ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2023 Red Hat
4  */
5 
6 #include "data-vio.h"
7 
8 #include <linux/atomic.h>
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12 #include <linux/device-mapper.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/lz4.h>
17 #include <linux/minmax.h>
18 #include <linux/sched.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include <linux/wait.h>
22 
23 #include "logger.h"
24 #include "memory-alloc.h"
25 #include "murmurhash3.h"
26 #include "permassert.h"
27 
28 #include "block-map.h"
29 #include "dump.h"
30 #include "encodings.h"
31 #include "int-map.h"
32 #include "io-submitter.h"
33 #include "logical-zone.h"
34 #include "packer.h"
35 #include "recovery-journal.h"
36 #include "slab-depot.h"
37 #include "status-codes.h"
38 #include "types.h"
39 #include "vdo.h"
40 #include "vio.h"
41 #include "wait-queue.h"
42 
43 /**
44  * DOC: Bio flags.
45  *
46  * For certain flags set on user bios, if the user bio has not yet been acknowledged, setting those
47  * flags on our own bio(s) for that request may help underlying layers better fulfill the user
48  * bio's needs. This constant contains the aggregate of those flags; VDO strips all the other
49  * flags, as they convey incorrect information.
50  *
51  * These flags are always irrelevant if we have already finished the user bio as they are only
52  * hints on IO importance. If VDO has finished the user bio, any remaining IO done doesn't care how
53  * important finishing the finished bio was.
54  *
55  * Note that bio.c contains the complete list of flags we believe may be set; the following list
56  * explains the action taken with each of those flags VDO could receive:
57  *
58  * * REQ_SYNC: Passed down if the user bio is not yet completed, since it indicates the user bio
59  *   completion is required for further work to be done by the issuer.
60  * * REQ_META: Passed down if the user bio is not yet completed, since it may mean the lower layer
61  *   treats it as more urgent, similar to REQ_SYNC.
62  * * REQ_PRIO: Passed down if the user bio is not yet completed, since it indicates the user bio is
63  *   important.
64  * * REQ_NOMERGE: Set only if the incoming bio was split; irrelevant to VDO IO.
65  * * REQ_IDLE: Set if the incoming bio had more IO quickly following; VDO's IO pattern doesn't
66  *   match incoming IO, so this flag is incorrect for it.
67  * * REQ_FUA: Handled separately, and irrelevant to VDO IO otherwise.
68  * * REQ_RAHEAD: Passed down, as, for reads, it indicates trivial importance.
69  * * REQ_BACKGROUND: Not passed down, as VIOs are a limited resource and VDO needs them recycled
70  *   ASAP to service heavy load, which is the only place where REQ_BACKGROUND might aid in load
71  *   prioritization.
72  */
73 static blk_opf_t PASSTHROUGH_FLAGS = (REQ_PRIO | REQ_META | REQ_SYNC | REQ_RAHEAD);
74 
75 /**
76  * DOC:
77  *
78  * The data_vio_pool maintains the pool of data_vios which a vdo uses to service incoming bios. For
79  * correctness, and in order to avoid potentially expensive or blocking memory allocations during
80  * normal operation, the number of concurrently active data_vios is capped. Furthermore, in order
81  * to avoid starvation of reads and writes, at most 75% of the data_vios may be used for
82  * discards. The data_vio_pool is responsible for enforcing these limits. Threads submitting bios
83  * for which a data_vio or discard permit are not available will block until the necessary
84  * resources are available. The pool is also responsible for distributing resources to blocked
85  * threads and waking them. Finally, the pool attempts to batch the work of recycling data_vios by
86  * performing the work of actually assigning resources to blocked threads or placing data_vios back
87  * into the pool on a single cpu at a time.
88  *
89  * The pool contains two "limiters", one for tracking data_vios and one for tracking discard
90  * permits. The limiters also provide safe cross-thread access to pool statistics without the need
91  * to take the pool's lock. When a thread submits a bio to a vdo device, it will first attempt to
92  * get a discard permit if it is a discard, and then to get a data_vio. If the necessary resources
93  * are available, the incoming bio will be assigned to the acquired data_vio, and it will be
94  * launched. However, if either of these are unavailable, the arrival time of the bio is recorded
95  * in the bio's bi_private field, the bio and its submitter are both queued on the appropriate
96  * limiter and the submitting thread will then put itself to sleep. (note that this mechanism will
97  * break if jiffies are only 32 bits.)
98  *
99  * Whenever a data_vio has completed processing for the bio it was servicing, release_data_vio()
100  * will be called on it. This function will add the data_vio to a funnel queue, and then check the
101  * state of the pool. If the pool is not currently processing released data_vios, the pool's
102  * completion will be enqueued on a cpu queue. This obviates the need for the releasing threads to
103  * hold the pool's lock, and also batches release work while avoiding starvation of the cpu
104  * threads.
105  *
106  * Whenever the pool's completion is run on a cpu thread, it calls process_release_callback() which
107  * processes a batch of returned data_vios (currently at most 32) from the pool's funnel queue. For
108  * each data_vio, it first checks whether that data_vio was processing a discard. If so, and there
109  * is a blocked bio waiting for a discard permit, that permit is notionally transferred to the
110  * eldest discard waiter, and that waiter is moved to the end of the list of discard bios waiting
111  * for a data_vio. If there are no discard waiters, the discard permit is returned to the pool.
112  * Next, the data_vio is assigned to the oldest blocked bio which either has a discard permit, or
113  * doesn't need one and relaunched. If neither of these exist, the data_vio is returned to the
114  * pool. Finally, if any waiting bios were launched, the threads which blocked trying to submit
115  * them are awakened.
116  */
117 
118 #define DATA_VIO_RELEASE_BATCH_SIZE 128
119 
120 static const unsigned int VDO_SECTORS_PER_BLOCK_MASK = VDO_SECTORS_PER_BLOCK - 1;
121 static const u32 COMPRESSION_STATUS_MASK = 0xff;
122 static const u32 MAY_NOT_COMPRESS_MASK = 0x80000000;
123 
124 struct limiter;
125 typedef void (*assigner_fn)(struct limiter *limiter);
126 
127 /* Bookkeeping structure for a single type of resource. */
128 struct limiter {
129 	/* The data_vio_pool to which this limiter belongs */
130 	struct data_vio_pool *pool;
131 	/* The maximum number of data_vios available */
132 	data_vio_count_t limit;
133 	/* The number of resources in use */
134 	data_vio_count_t busy;
135 	/* The maximum number of resources ever simultaneously in use */
136 	data_vio_count_t max_busy;
137 	/* The number of resources to release */
138 	data_vio_count_t release_count;
139 	/* The number of waiters to wake */
140 	data_vio_count_t wake_count;
141 	/* The list of waiting bios which are known to process_release_callback() */
142 	struct bio_list waiters;
143 	/* The list of waiting bios which are not yet known to process_release_callback() */
144 	struct bio_list new_waiters;
145 	/* The list of waiters which have their permits */
146 	struct bio_list *permitted_waiters;
147 	/* The function for assigning a resource to a waiter */
148 	assigner_fn assigner;
149 	/* The queue of blocked threads */
150 	wait_queue_head_t blocked_threads;
151 	/* The arrival time of the eldest waiter */
152 	u64 arrival;
153 };
154 
155 /*
156  * A data_vio_pool is a collection of preallocated data_vios which may be acquired from any thread,
157  * and are released in batches.
158  */
159 struct data_vio_pool {
160 	/* Completion for scheduling releases */
161 	struct vdo_completion completion;
162 	/* The administrative state of the pool */
163 	struct admin_state state;
164 	/* Lock protecting the pool */
165 	spinlock_t lock;
166 	/* The main limiter controlling the total data_vios in the pool. */
167 	struct limiter limiter;
168 	/* The limiter controlling data_vios for discard */
169 	struct limiter discard_limiter;
170 	/* The list of bios which have discard permits but still need a data_vio */
171 	struct bio_list permitted_discards;
172 	/* The list of available data_vios */
173 	struct list_head available;
174 	/* The queue of data_vios waiting to be returned to the pool */
175 	struct funnel_queue *queue;
176 	/* Whether the pool is processing, or scheduled to process releases */
177 	atomic_t processing;
178 	/* The data vios in the pool */
179 	struct data_vio data_vios[];
180 };
181 
182 static const char * const ASYNC_OPERATION_NAMES[] = {
183 	"launch",
184 	"acknowledge_write",
185 	"acquire_hash_lock",
186 	"attempt_logical_block_lock",
187 	"lock_duplicate_pbn",
188 	"check_for_duplication",
189 	"cleanup",
190 	"compress_data_vio",
191 	"find_block_map_slot",
192 	"get_mapped_block_for_read",
193 	"get_mapped_block_for_write",
194 	"hash_data_vio",
195 	"journal_remapping",
196 	"vdo_attempt_packing",
197 	"put_mapped_block",
198 	"read_data_vio",
199 	"update_dedupe_index",
200 	"update_reference_counts",
201 	"verify_duplication",
202 	"write_data_vio",
203 };
204 
205 /* The steps taken cleaning up a VIO, in the order they are performed. */
206 enum data_vio_cleanup_stage {
207 	VIO_CLEANUP_START,
208 	VIO_RELEASE_HASH_LOCK = VIO_CLEANUP_START,
209 	VIO_RELEASE_ALLOCATED,
210 	VIO_RELEASE_RECOVERY_LOCKS,
211 	VIO_RELEASE_LOGICAL,
212 	VIO_CLEANUP_DONE
213 };
214 
215 static inline struct data_vio_pool * __must_check
as_data_vio_pool(struct vdo_completion * completion)216 as_data_vio_pool(struct vdo_completion *completion)
217 {
218 	vdo_assert_completion_type(completion, VDO_DATA_VIO_POOL_COMPLETION);
219 	return container_of(completion, struct data_vio_pool, completion);
220 }
221 
get_arrival_time(struct bio * bio)222 static inline u64 get_arrival_time(struct bio *bio)
223 {
224 	return (u64) bio->bi_private;
225 }
226 
227 /**
228  * check_for_drain_complete_locked() - Check whether a data_vio_pool has no outstanding data_vios
229  *				       or waiters while holding the pool's lock.
230  * @pool: The data_vio pool.
231  */
check_for_drain_complete_locked(struct data_vio_pool * pool)232 static bool check_for_drain_complete_locked(struct data_vio_pool *pool)
233 {
234 	if (pool->limiter.busy > 0)
235 		return false;
236 
237 	VDO_ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0),
238 			    "no outstanding discard permits");
239 
240 	return (bio_list_empty(&pool->limiter.new_waiters) &&
241 		bio_list_empty(&pool->discard_limiter.new_waiters));
242 }
243 
initialize_lbn_lock(struct data_vio * data_vio,logical_block_number_t lbn)244 static void initialize_lbn_lock(struct data_vio *data_vio, logical_block_number_t lbn)
245 {
246 	struct vdo *vdo = vdo_from_data_vio(data_vio);
247 	zone_count_t zone_number;
248 	struct lbn_lock *lock = &data_vio->logical;
249 
250 	lock->lbn = lbn;
251 	lock->locked = false;
252 	vdo_waitq_init(&lock->waiters);
253 	zone_number = vdo_compute_logical_zone(data_vio);
254 	lock->zone = &vdo->logical_zones->zones[zone_number];
255 }
256 
launch_locked_request(struct data_vio * data_vio)257 static void launch_locked_request(struct data_vio *data_vio)
258 {
259 	data_vio->logical.locked = true;
260 	if (data_vio->write) {
261 		struct vdo *vdo = vdo_from_data_vio(data_vio);
262 
263 		if (vdo_is_read_only(vdo)) {
264 			continue_data_vio_with_error(data_vio, VDO_READ_ONLY);
265 			return;
266 		}
267 	}
268 
269 	data_vio->last_async_operation = VIO_ASYNC_OP_FIND_BLOCK_MAP_SLOT;
270 	vdo_find_block_map_slot(data_vio);
271 }
272 
acknowledge_data_vio(struct data_vio * data_vio)273 static void acknowledge_data_vio(struct data_vio *data_vio)
274 {
275 	struct vdo *vdo = vdo_from_data_vio(data_vio);
276 	struct bio *bio = data_vio->user_bio;
277 	int error = vdo_status_to_errno(data_vio->vio.completion.result);
278 
279 	if (bio == NULL)
280 		return;
281 
282 	VDO_ASSERT_LOG_ONLY((data_vio->remaining_discard <=
283 			     (u32) (VDO_BLOCK_SIZE - data_vio->offset)),
284 			    "data_vio to acknowledge is not an incomplete discard");
285 
286 	data_vio->user_bio = NULL;
287 	vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
288 	if (data_vio->is_partial)
289 		vdo_count_bios(&vdo->stats.bios_acknowledged_partial, bio);
290 
291 	bio->bi_status = errno_to_blk_status(error);
292 	bio_endio(bio);
293 }
294 
copy_to_bio(struct bio * bio,char * data_ptr)295 static void copy_to_bio(struct bio *bio, char *data_ptr)
296 {
297 	struct bio_vec biovec;
298 	struct bvec_iter iter;
299 
300 	bio_for_each_segment(biovec, bio, iter) {
301 		memcpy_to_bvec(&biovec, data_ptr);
302 		data_ptr += biovec.bv_len;
303 	}
304 }
305 
get_data_vio_compression_status(struct data_vio * data_vio)306 struct data_vio_compression_status get_data_vio_compression_status(struct data_vio *data_vio)
307 {
308 	u32 packed = atomic_read(&data_vio->compression.status);
309 
310 	/* pairs with cmpxchg in set_data_vio_compression_status */
311 	smp_rmb();
312 	return (struct data_vio_compression_status) {
313 		.stage = packed & COMPRESSION_STATUS_MASK,
314 		.may_not_compress = ((packed & MAY_NOT_COMPRESS_MASK) != 0),
315 	};
316 }
317 
318 /**
319  * pack_status() - Convert a data_vio_compression_status into a u32 which may be stored
320  *                 atomically.
321  * @status: The state to convert.
322  *
323  * Return: The compression state packed into a u32.
324  */
pack_status(struct data_vio_compression_status status)325 static u32 __must_check pack_status(struct data_vio_compression_status status)
326 {
327 	return status.stage | (status.may_not_compress ? MAY_NOT_COMPRESS_MASK : 0);
328 }
329 
330 /**
331  * set_data_vio_compression_status() - Set the compression status of a data_vio.
332  * @data_vio: The data_vio to change.
333  * @status: The expected current status of the data_vio.
334  * @new_status: The status to set.
335  *
336  * Return: true if the new status was set, false if the data_vio's compression status did not
337  *         match the expected state, and so was left unchanged.
338  */
339 static bool __must_check
set_data_vio_compression_status(struct data_vio * data_vio,struct data_vio_compression_status status,struct data_vio_compression_status new_status)340 set_data_vio_compression_status(struct data_vio *data_vio,
341 				struct data_vio_compression_status status,
342 				struct data_vio_compression_status new_status)
343 {
344 	u32 actual;
345 	u32 expected = pack_status(status);
346 	u32 replacement = pack_status(new_status);
347 
348 	/*
349 	 * Extra barriers because this was original developed using a CAS operation that implicitly
350 	 * had them.
351 	 */
352 	smp_mb__before_atomic();
353 	actual = atomic_cmpxchg(&data_vio->compression.status, expected, replacement);
354 	/* same as before_atomic */
355 	smp_mb__after_atomic();
356 	return (expected == actual);
357 }
358 
advance_data_vio_compression_stage(struct data_vio * data_vio)359 struct data_vio_compression_status advance_data_vio_compression_stage(struct data_vio *data_vio)
360 {
361 	for (;;) {
362 		struct data_vio_compression_status status =
363 			get_data_vio_compression_status(data_vio);
364 		struct data_vio_compression_status new_status = status;
365 
366 		if (status.stage == DATA_VIO_POST_PACKER) {
367 			/* We're already in the last stage. */
368 			return status;
369 		}
370 
371 		if (status.may_not_compress) {
372 			/*
373 			 * Compression has been dis-allowed for this VIO, so skip the rest of the
374 			 * path and go to the end.
375 			 */
376 			new_status.stage = DATA_VIO_POST_PACKER;
377 		} else {
378 			/* Go to the next state. */
379 			new_status.stage++;
380 		}
381 
382 		if (set_data_vio_compression_status(data_vio, status, new_status))
383 			return new_status;
384 
385 		/* Another thread changed the status out from under us so try again. */
386 	}
387 }
388 
389 /**
390  * cancel_data_vio_compression() - Prevent this data_vio from being compressed or packed.
391  * @data_vio: The data_vio.
392  *
393  * Return: true if the data_vio is in the packer and the caller was the first caller to cancel it.
394  */
cancel_data_vio_compression(struct data_vio * data_vio)395 bool cancel_data_vio_compression(struct data_vio *data_vio)
396 {
397 	struct data_vio_compression_status status, new_status;
398 
399 	for (;;) {
400 		status = get_data_vio_compression_status(data_vio);
401 		if (status.may_not_compress || (status.stage == DATA_VIO_POST_PACKER)) {
402 			/* This data_vio is already set up to not block in the packer. */
403 			break;
404 		}
405 
406 		new_status.stage = status.stage;
407 		new_status.may_not_compress = true;
408 
409 		if (set_data_vio_compression_status(data_vio, status, new_status))
410 			break;
411 	}
412 
413 	return ((status.stage == DATA_VIO_PACKING) && !status.may_not_compress);
414 }
415 
416 /**
417  * attempt_logical_block_lock() - Attempt to acquire the lock on a logical block.
418  * @completion: The data_vio for an external data request as a completion.
419  *
420  * This is the start of the path for all external requests. It is registered in launch_data_vio().
421  */
attempt_logical_block_lock(struct vdo_completion * completion)422 static void attempt_logical_block_lock(struct vdo_completion *completion)
423 {
424 	struct data_vio *data_vio = as_data_vio(completion);
425 	struct lbn_lock *lock = &data_vio->logical;
426 	struct vdo *vdo = vdo_from_data_vio(data_vio);
427 	struct data_vio *lock_holder;
428 	int result;
429 
430 	assert_data_vio_in_logical_zone(data_vio);
431 
432 	if (data_vio->logical.lbn >= vdo->states.vdo.config.logical_blocks) {
433 		continue_data_vio_with_error(data_vio, VDO_OUT_OF_RANGE);
434 		return;
435 	}
436 
437 	result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn,
438 				 data_vio, false, (void **) &lock_holder);
439 	if (result != VDO_SUCCESS) {
440 		continue_data_vio_with_error(data_vio, result);
441 		return;
442 	}
443 
444 	if (lock_holder == NULL) {
445 		/* We got the lock */
446 		launch_locked_request(data_vio);
447 		return;
448 	}
449 
450 	result = VDO_ASSERT(lock_holder->logical.locked, "logical block lock held");
451 	if (result != VDO_SUCCESS) {
452 		continue_data_vio_with_error(data_vio, result);
453 		return;
454 	}
455 
456 	/*
457 	 * If the new request is a pure read request (not read-modify-write) and the lock_holder is
458 	 * writing and has received an allocation, service the read request immediately by copying
459 	 * data from the lock_holder to avoid having to flush the write out of the packer just to
460 	 * prevent the read from waiting indefinitely. If the lock_holder does not yet have an
461 	 * allocation, prevent it from blocking in the packer and wait on it. This is necessary in
462 	 * order to prevent returning data that may not have actually been written.
463 	 */
464 	if (!data_vio->write && READ_ONCE(lock_holder->allocation_succeeded)) {
465 		copy_to_bio(data_vio->user_bio, lock_holder->vio.data + data_vio->offset);
466 		acknowledge_data_vio(data_vio);
467 		complete_data_vio(completion);
468 		return;
469 	}
470 
471 	data_vio->last_async_operation = VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK;
472 	vdo_waitq_enqueue_waiter(&lock_holder->logical.waiters, &data_vio->waiter);
473 
474 	/*
475 	 * Prevent writes and read-modify-writes from blocking indefinitely on lock holders in the
476 	 * packer.
477 	 */
478 	if (lock_holder->write && cancel_data_vio_compression(lock_holder)) {
479 		data_vio->compression.lock_holder = lock_holder;
480 		launch_data_vio_packer_callback(data_vio,
481 						vdo_remove_lock_holder_from_packer);
482 	}
483 }
484 
485 /**
486  * launch_data_vio() - (Re)initialize a data_vio to have a new logical block number, keeping the
487  *		       same parent and other state and send it on its way.
488  * @data_vio: The data_vio to launch.
489  * @lbn: The logical block number.
490  */
launch_data_vio(struct data_vio * data_vio,logical_block_number_t lbn)491 static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lbn)
492 {
493 	struct vdo_completion *completion = &data_vio->vio.completion;
494 
495 	/*
496 	 * Clearing the tree lock must happen before initializing the LBN lock, which also adds
497 	 * information to the tree lock.
498 	 */
499 	memset(&data_vio->tree_lock, 0, sizeof(data_vio->tree_lock));
500 	initialize_lbn_lock(data_vio, lbn);
501 	INIT_LIST_HEAD(&data_vio->hash_lock_entry);
502 	INIT_LIST_HEAD(&data_vio->write_entry);
503 
504 	memset(&data_vio->allocation, 0, sizeof(data_vio->allocation));
505 
506 	data_vio->is_duplicate = false;
507 
508 	memset(&data_vio->record_name, 0, sizeof(data_vio->record_name));
509 	memset(&data_vio->duplicate, 0, sizeof(data_vio->duplicate));
510 	vdo_reset_completion(&data_vio->decrement_completion);
511 	vdo_reset_completion(completion);
512 	completion->error_handler = handle_data_vio_error;
513 	set_data_vio_logical_callback(data_vio, attempt_logical_block_lock);
514 	vdo_enqueue_completion(completion, VDO_DEFAULT_Q_MAP_BIO_PRIORITY);
515 }
516 
copy_from_bio(struct bio * bio,char * data_ptr)517 static void copy_from_bio(struct bio *bio, char *data_ptr)
518 {
519 	struct bio_vec biovec;
520 	struct bvec_iter iter;
521 
522 	bio_for_each_segment(biovec, bio, iter) {
523 		memcpy_from_bvec(data_ptr, &biovec);
524 		data_ptr += biovec.bv_len;
525 	}
526 }
527 
launch_bio(struct vdo * vdo,struct data_vio * data_vio,struct bio * bio)528 static void launch_bio(struct vdo *vdo, struct data_vio *data_vio, struct bio *bio)
529 {
530 	logical_block_number_t lbn;
531 	/*
532 	 * Zero out the fields which don't need to be preserved (i.e. which are not pointers to
533 	 * separately allocated objects).
534 	 */
535 	memset(data_vio, 0, offsetof(struct data_vio, vio));
536 	memset(&data_vio->compression, 0, offsetof(struct compression_state, block));
537 
538 	data_vio->user_bio = bio;
539 	data_vio->offset = to_bytes(bio->bi_iter.bi_sector & VDO_SECTORS_PER_BLOCK_MASK);
540 	data_vio->is_partial = (bio->bi_iter.bi_size < VDO_BLOCK_SIZE) || (data_vio->offset != 0);
541 
542 	/*
543 	 * Discards behave very differently than other requests when coming in from device-mapper.
544 	 * We have to be able to handle any size discards and various sector offsets within a
545 	 * block.
546 	 */
547 	if (bio_op(bio) == REQ_OP_DISCARD) {
548 		data_vio->remaining_discard = bio->bi_iter.bi_size;
549 		data_vio->write = true;
550 		data_vio->is_discard = true;
551 		if (data_vio->is_partial) {
552 			vdo_count_bios(&vdo->stats.bios_in_partial, bio);
553 			data_vio->read = true;
554 		}
555 	} else if (data_vio->is_partial) {
556 		vdo_count_bios(&vdo->stats.bios_in_partial, bio);
557 		data_vio->read = true;
558 		if (bio_data_dir(bio) == WRITE)
559 			data_vio->write = true;
560 	} else if (bio_data_dir(bio) == READ) {
561 		data_vio->read = true;
562 	} else {
563 		/*
564 		 * Copy the bio data to a char array so that we can continue to use the data after
565 		 * we acknowledge the bio.
566 		 */
567 		copy_from_bio(bio, data_vio->vio.data);
568 		data_vio->is_zero = mem_is_zero(data_vio->vio.data, VDO_BLOCK_SIZE);
569 		data_vio->write = true;
570 	}
571 
572 	if (data_vio->user_bio->bi_opf & REQ_FUA)
573 		data_vio->fua = true;
574 
575 	lbn = (bio->bi_iter.bi_sector - vdo->starting_sector_offset) / VDO_SECTORS_PER_BLOCK;
576 	launch_data_vio(data_vio, lbn);
577 }
578 
assign_data_vio(struct limiter * limiter,struct data_vio * data_vio)579 static void assign_data_vio(struct limiter *limiter, struct data_vio *data_vio)
580 {
581 	struct bio *bio = bio_list_pop(limiter->permitted_waiters);
582 
583 	launch_bio(limiter->pool->completion.vdo, data_vio, bio);
584 	limiter->wake_count++;
585 
586 	bio = bio_list_peek(limiter->permitted_waiters);
587 	limiter->arrival = ((bio == NULL) ? U64_MAX : get_arrival_time(bio));
588 }
589 
assign_discard_permit(struct limiter * limiter)590 static void assign_discard_permit(struct limiter *limiter)
591 {
592 	struct bio *bio = bio_list_pop(&limiter->waiters);
593 
594 	if (limiter->arrival == U64_MAX)
595 		limiter->arrival = get_arrival_time(bio);
596 
597 	bio_list_add(limiter->permitted_waiters, bio);
598 }
599 
get_waiters(struct limiter * limiter)600 static void get_waiters(struct limiter *limiter)
601 {
602 	bio_list_merge_init(&limiter->waiters, &limiter->new_waiters);
603 }
604 
get_available_data_vio(struct data_vio_pool * pool)605 static inline struct data_vio *get_available_data_vio(struct data_vio_pool *pool)
606 {
607 	struct data_vio *data_vio =
608 		list_first_entry(&pool->available, struct data_vio, pool_entry);
609 
610 	list_del_init(&data_vio->pool_entry);
611 	return data_vio;
612 }
613 
assign_data_vio_to_waiter(struct limiter * limiter)614 static void assign_data_vio_to_waiter(struct limiter *limiter)
615 {
616 	assign_data_vio(limiter, get_available_data_vio(limiter->pool));
617 }
618 
update_limiter(struct limiter * limiter)619 static void update_limiter(struct limiter *limiter)
620 {
621 	struct bio_list *waiters = &limiter->waiters;
622 	data_vio_count_t available = limiter->limit - limiter->busy;
623 
624 	VDO_ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy),
625 			    "Release count %u is not more than busy count %u",
626 			    limiter->release_count, limiter->busy);
627 
628 	get_waiters(limiter);
629 	for (; (limiter->release_count > 0) && !bio_list_empty(waiters); limiter->release_count--)
630 		limiter->assigner(limiter);
631 
632 	if (limiter->release_count > 0) {
633 		WRITE_ONCE(limiter->busy, limiter->busy - limiter->release_count);
634 		limiter->release_count = 0;
635 		return;
636 	}
637 
638 	for (; (available > 0) && !bio_list_empty(waiters); available--)
639 		limiter->assigner(limiter);
640 
641 	WRITE_ONCE(limiter->busy, limiter->limit - available);
642 	if (limiter->max_busy < limiter->busy)
643 		WRITE_ONCE(limiter->max_busy, limiter->busy);
644 }
645 
646 /**
647  * schedule_releases() - Ensure that release processing is scheduled.
648  * @pool: The data_vio pool.
649  *
650  * If this call switches the state to processing, enqueue. Otherwise, some other thread has already
651  * done so.
652  */
schedule_releases(struct data_vio_pool * pool)653 static void schedule_releases(struct data_vio_pool *pool)
654 {
655 	/* Pairs with the barrier in process_release_callback(). */
656 	smp_mb__before_atomic();
657 	if (atomic_cmpxchg(&pool->processing, false, true))
658 		return;
659 
660 	pool->completion.requeue = true;
661 	vdo_launch_completion_with_priority(&pool->completion,
662 					    CPU_Q_COMPLETE_VIO_PRIORITY);
663 }
664 
reuse_or_release_resources(struct data_vio_pool * pool,struct data_vio * data_vio,struct list_head * returned)665 static void reuse_or_release_resources(struct data_vio_pool *pool,
666 				       struct data_vio *data_vio,
667 				       struct list_head *returned)
668 {
669 	if (data_vio->remaining_discard > 0) {
670 		if (bio_list_empty(&pool->discard_limiter.waiters)) {
671 			/* Return the data_vio's discard permit. */
672 			pool->discard_limiter.release_count++;
673 		} else {
674 			assign_discard_permit(&pool->discard_limiter);
675 		}
676 	}
677 
678 	if (pool->limiter.arrival < pool->discard_limiter.arrival) {
679 		assign_data_vio(&pool->limiter, data_vio);
680 	} else if (pool->discard_limiter.arrival < U64_MAX) {
681 		assign_data_vio(&pool->discard_limiter, data_vio);
682 	} else {
683 		list_add(&data_vio->pool_entry, returned);
684 		pool->limiter.release_count++;
685 	}
686 }
687 
688 /**
689  * process_release_callback() - Process a batch of data_vio releases.
690  * @completion: The pool with data_vios to release.
691  */
process_release_callback(struct vdo_completion * completion)692 static void process_release_callback(struct vdo_completion *completion)
693 {
694 	struct data_vio_pool *pool = as_data_vio_pool(completion);
695 	bool reschedule;
696 	bool drained;
697 	data_vio_count_t processed;
698 	data_vio_count_t to_wake;
699 	data_vio_count_t discards_to_wake;
700 	LIST_HEAD(returned);
701 
702 	spin_lock(&pool->lock);
703 	get_waiters(&pool->discard_limiter);
704 	get_waiters(&pool->limiter);
705 	spin_unlock(&pool->lock);
706 
707 	if (pool->limiter.arrival == U64_MAX) {
708 		struct bio *bio = bio_list_peek(&pool->limiter.waiters);
709 
710 		if (bio != NULL)
711 			pool->limiter.arrival = get_arrival_time(bio);
712 	}
713 
714 	for (processed = 0; processed < DATA_VIO_RELEASE_BATCH_SIZE; processed++) {
715 		struct data_vio *data_vio;
716 		struct funnel_queue_entry *entry = vdo_funnel_queue_poll(pool->queue);
717 
718 		if (entry == NULL)
719 			break;
720 
721 		data_vio = as_data_vio(container_of(entry, struct vdo_completion,
722 						    work_queue_entry_link));
723 		acknowledge_data_vio(data_vio);
724 		reuse_or_release_resources(pool, data_vio, &returned);
725 	}
726 
727 	spin_lock(&pool->lock);
728 	/*
729 	 * There is a race where waiters could be added while we are in the unlocked section above.
730 	 * Those waiters could not see the resources we are now about to release, so we assign
731 	 * those resources now as we have no guarantee of being rescheduled. This is handled in
732 	 * update_limiter().
733 	 */
734 	update_limiter(&pool->discard_limiter);
735 	list_splice(&returned, &pool->available);
736 	update_limiter(&pool->limiter);
737 	to_wake = pool->limiter.wake_count;
738 	pool->limiter.wake_count = 0;
739 	discards_to_wake = pool->discard_limiter.wake_count;
740 	pool->discard_limiter.wake_count = 0;
741 
742 	atomic_set(&pool->processing, false);
743 	/* Pairs with the barrier in schedule_releases(). */
744 	smp_mb();
745 
746 	reschedule = !vdo_is_funnel_queue_empty(pool->queue);
747 	drained = (!reschedule &&
748 		   vdo_is_state_draining(&pool->state) &&
749 		   check_for_drain_complete_locked(pool));
750 	spin_unlock(&pool->lock);
751 
752 	if (to_wake > 0)
753 		wake_up_nr(&pool->limiter.blocked_threads, to_wake);
754 
755 	if (discards_to_wake > 0)
756 		wake_up_nr(&pool->discard_limiter.blocked_threads, discards_to_wake);
757 
758 	if (reschedule)
759 		schedule_releases(pool);
760 	else if (drained)
761 		vdo_finish_draining(&pool->state);
762 }
763 
initialize_limiter(struct limiter * limiter,struct data_vio_pool * pool,assigner_fn assigner,data_vio_count_t limit)764 static void initialize_limiter(struct limiter *limiter, struct data_vio_pool *pool,
765 			       assigner_fn assigner, data_vio_count_t limit)
766 {
767 	limiter->pool = pool;
768 	limiter->assigner = assigner;
769 	limiter->limit = limit;
770 	limiter->arrival = U64_MAX;
771 	init_waitqueue_head(&limiter->blocked_threads);
772 }
773 
774 /**
775  * initialize_data_vio() - Allocate the components of a data_vio.
776  * @data_vio: The data_vio to initialize.
777  * @vdo: The vdo containing the data_vio.
778  *
779  * The caller is responsible for cleaning up the data_vio on error.
780  *
781  * Return: VDO_SUCCESS or an error.
782  */
initialize_data_vio(struct data_vio * data_vio,struct vdo * vdo)783 static int initialize_data_vio(struct data_vio *data_vio, struct vdo *vdo)
784 {
785 	struct bio *bio;
786 	int result;
787 
788 	BUILD_BUG_ON(VDO_BLOCK_SIZE > PAGE_SIZE);
789 	result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "data_vio data",
790 				     &data_vio->vio.data);
791 	if (result != VDO_SUCCESS)
792 		return vdo_log_error_strerror(result,
793 					      "data_vio data allocation failure");
794 
795 	result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "compressed block",
796 				     &data_vio->compression.block);
797 	if (result != VDO_SUCCESS) {
798 		return vdo_log_error_strerror(result,
799 					      "data_vio compressed block allocation failure");
800 	}
801 
802 	result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "vio scratch",
803 				     &data_vio->scratch_block);
804 	if (result != VDO_SUCCESS)
805 		return vdo_log_error_strerror(result,
806 					      "data_vio scratch allocation failure");
807 
808 	result = vdo_create_bio(&bio);
809 	if (result != VDO_SUCCESS)
810 		return vdo_log_error_strerror(result,
811 					      "data_vio data bio allocation failure");
812 
813 	vdo_initialize_completion(&data_vio->decrement_completion, vdo,
814 				  VDO_DECREMENT_COMPLETION);
815 	initialize_vio(&data_vio->vio, bio, 1, VIO_TYPE_DATA, VIO_PRIORITY_DATA, vdo);
816 
817 	return VDO_SUCCESS;
818 }
819 
destroy_data_vio(struct data_vio * data_vio)820 static void destroy_data_vio(struct data_vio *data_vio)
821 {
822 	if (data_vio == NULL)
823 		return;
824 
825 	vdo_free_bio(vdo_forget(data_vio->vio.bio));
826 	vdo_free(vdo_forget(data_vio->vio.data));
827 	vdo_free(vdo_forget(data_vio->compression.block));
828 	vdo_free(vdo_forget(data_vio->scratch_block));
829 }
830 
831 /**
832  * make_data_vio_pool() - Initialize a data_vio pool.
833  * @vdo: The vdo to which the pool will belong.
834  * @pool_size: The number of data_vios in the pool.
835  * @discard_limit: The maximum number of data_vios which may be used for discards.
836  * @pool_ptr: A pointer to hold the newly allocated pool.
837  */
make_data_vio_pool(struct vdo * vdo,data_vio_count_t pool_size,data_vio_count_t discard_limit,struct data_vio_pool ** pool_ptr)838 int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
839 		       data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr)
840 {
841 	int result;
842 	struct data_vio_pool *pool;
843 	data_vio_count_t i;
844 
845 	result = vdo_allocate_extended(pool_size, data_vios, __func__, &pool);
846 	if (result != VDO_SUCCESS)
847 		return result;
848 
849 	VDO_ASSERT_LOG_ONLY((discard_limit <= pool_size),
850 			    "discard limit does not exceed pool size");
851 	initialize_limiter(&pool->discard_limiter, pool, assign_discard_permit,
852 			   discard_limit);
853 	pool->discard_limiter.permitted_waiters = &pool->permitted_discards;
854 	initialize_limiter(&pool->limiter, pool, assign_data_vio_to_waiter, pool_size);
855 	pool->limiter.permitted_waiters = &pool->limiter.waiters;
856 	INIT_LIST_HEAD(&pool->available);
857 	spin_lock_init(&pool->lock);
858 	vdo_set_admin_state_code(&pool->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
859 	vdo_initialize_completion(&pool->completion, vdo, VDO_DATA_VIO_POOL_COMPLETION);
860 	vdo_prepare_completion(&pool->completion, process_release_callback,
861 			       process_release_callback, vdo->thread_config.cpu_thread,
862 			       NULL);
863 
864 	result = vdo_make_funnel_queue(&pool->queue);
865 	if (result != VDO_SUCCESS) {
866 		free_data_vio_pool(vdo_forget(pool));
867 		return result;
868 	}
869 
870 	for (i = 0; i < pool_size; i++) {
871 		struct data_vio *data_vio = &pool->data_vios[i];
872 
873 		result = initialize_data_vio(data_vio, vdo);
874 		if (result != VDO_SUCCESS) {
875 			destroy_data_vio(data_vio);
876 			free_data_vio_pool(pool);
877 			return result;
878 		}
879 
880 		list_add(&data_vio->pool_entry, &pool->available);
881 	}
882 
883 	*pool_ptr = pool;
884 	return VDO_SUCCESS;
885 }
886 
887 /**
888  * free_data_vio_pool() - Free a data_vio_pool and the data_vios in it.
889  * @pool: The data_vio pool to free.
890  *
891  * All data_vios must be returned to the pool before calling this function.
892  */
free_data_vio_pool(struct data_vio_pool * pool)893 void free_data_vio_pool(struct data_vio_pool *pool)
894 {
895 	struct data_vio *data_vio, *tmp;
896 
897 	if (pool == NULL)
898 		return;
899 
900 	/*
901 	 * Pairs with the barrier in process_release_callback(). Possibly not needed since it
902 	 * caters to an enqueue vs. free race.
903 	 */
904 	smp_mb();
905 	BUG_ON(atomic_read(&pool->processing));
906 
907 	spin_lock(&pool->lock);
908 	VDO_ASSERT_LOG_ONLY((pool->limiter.busy == 0),
909 			    "data_vio pool must not have %u busy entries when being freed",
910 			    pool->limiter.busy);
911 	VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) &&
912 			     bio_list_empty(&pool->limiter.new_waiters)),
913 			    "data_vio pool must not have threads waiting to read or write when being freed");
914 	VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) &&
915 			     bio_list_empty(&pool->discard_limiter.new_waiters)),
916 			    "data_vio pool must not have threads waiting to discard when being freed");
917 	spin_unlock(&pool->lock);
918 
919 	list_for_each_entry_safe(data_vio, tmp, &pool->available, pool_entry) {
920 		list_del_init(&data_vio->pool_entry);
921 		destroy_data_vio(data_vio);
922 	}
923 
924 	vdo_free_funnel_queue(vdo_forget(pool->queue));
925 	vdo_free(pool);
926 }
927 
acquire_permit(struct limiter * limiter)928 static bool acquire_permit(struct limiter *limiter)
929 {
930 	if (limiter->busy >= limiter->limit)
931 		return false;
932 
933 	WRITE_ONCE(limiter->busy, limiter->busy + 1);
934 	if (limiter->max_busy < limiter->busy)
935 		WRITE_ONCE(limiter->max_busy, limiter->busy);
936 	return true;
937 }
938 
wait_permit(struct limiter * limiter,struct bio * bio)939 static void wait_permit(struct limiter *limiter, struct bio *bio)
940 	__releases(&limiter->pool->lock)
941 {
942 	DEFINE_WAIT(wait);
943 
944 	bio_list_add(&limiter->new_waiters, bio);
945 	prepare_to_wait_exclusive(&limiter->blocked_threads, &wait,
946 				  TASK_UNINTERRUPTIBLE);
947 	spin_unlock(&limiter->pool->lock);
948 	io_schedule();
949 	finish_wait(&limiter->blocked_threads, &wait);
950 }
951 
952 /**
953  * vdo_launch_bio() - Acquire a data_vio from the pool, assign the bio to it, and launch it.
954  * @pool: The data_vio pool.
955  * @bio: The bio to launch.
956  *
957  * This will block if data_vios or discard permits are not available.
958  */
vdo_launch_bio(struct data_vio_pool * pool,struct bio * bio)959 void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio)
960 {
961 	struct data_vio *data_vio;
962 
963 	VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state),
964 			    "data_vio_pool not quiescent on acquire");
965 
966 	bio->bi_private = (void *) jiffies;
967 	spin_lock(&pool->lock);
968 	if ((bio_op(bio) == REQ_OP_DISCARD) &&
969 	    !acquire_permit(&pool->discard_limiter)) {
970 		wait_permit(&pool->discard_limiter, bio);
971 		return;
972 	}
973 
974 	if (!acquire_permit(&pool->limiter)) {
975 		wait_permit(&pool->limiter, bio);
976 		return;
977 	}
978 
979 	data_vio = get_available_data_vio(pool);
980 	spin_unlock(&pool->lock);
981 	launch_bio(pool->completion.vdo, data_vio, bio);
982 }
983 
984 /* Implements vdo_admin_initiator_fn. */
initiate_drain(struct admin_state * state)985 static void initiate_drain(struct admin_state *state)
986 {
987 	bool drained;
988 	struct data_vio_pool *pool = container_of(state, struct data_vio_pool, state);
989 
990 	spin_lock(&pool->lock);
991 	drained = check_for_drain_complete_locked(pool);
992 	spin_unlock(&pool->lock);
993 
994 	if (drained)
995 		vdo_finish_draining(state);
996 }
997 
assert_on_vdo_cpu_thread(const struct vdo * vdo,const char * name)998 static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name)
999 {
1000 	VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread),
1001 			    "%s called on cpu thread", name);
1002 }
1003 
1004 /**
1005  * drain_data_vio_pool() - Wait asynchronously for all data_vios to be returned to the pool.
1006  * @pool: The data_vio pool.
1007  * @completion: The completion to notify when the pool has drained.
1008  */
drain_data_vio_pool(struct data_vio_pool * pool,struct vdo_completion * completion)1009 void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
1010 {
1011 	assert_on_vdo_cpu_thread(completion->vdo, __func__);
1012 	vdo_start_draining(&pool->state, VDO_ADMIN_STATE_SUSPENDING, completion,
1013 			   initiate_drain);
1014 }
1015 
1016 /**
1017  * resume_data_vio_pool() - Resume a data_vio pool.
1018  * @pool: The data_vio pool.
1019  * @completion: The completion to notify when the pool has resumed.
1020  */
resume_data_vio_pool(struct data_vio_pool * pool,struct vdo_completion * completion)1021 void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
1022 {
1023 	assert_on_vdo_cpu_thread(completion->vdo, __func__);
1024 	vdo_continue_completion(completion, vdo_resume_if_quiescent(&pool->state));
1025 }
1026 
dump_limiter(const char * name,struct limiter * limiter)1027 static void dump_limiter(const char *name, struct limiter *limiter)
1028 {
1029 	vdo_log_info("%s: %u of %u busy (max %u), %s", name, limiter->busy,
1030 		     limiter->limit, limiter->max_busy,
1031 		     ((bio_list_empty(&limiter->waiters) &&
1032 		       bio_list_empty(&limiter->new_waiters)) ?
1033 		      "no waiters" : "has waiters"));
1034 }
1035 
1036 /**
1037  * dump_data_vio_pool() - Dump a data_vio pool to the log.
1038  * @pool: The data_vio pool.
1039  * @dump_vios: Whether to dump the details of each busy data_vio as well.
1040  */
dump_data_vio_pool(struct data_vio_pool * pool,bool dump_vios)1041 void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios)
1042 {
1043 	/*
1044 	 * In order that syslog can empty its buffer, sleep after 35 elements for 4ms (till the
1045 	 * second clock tick).  These numbers were picked based on experiments with lab machines.
1046 	 */
1047 	static const int ELEMENTS_PER_BATCH = 35;
1048 	static const int SLEEP_FOR_SYSLOG = 4000;
1049 
1050 	if (pool == NULL)
1051 		return;
1052 
1053 	spin_lock(&pool->lock);
1054 	dump_limiter("data_vios", &pool->limiter);
1055 	dump_limiter("discard permits", &pool->discard_limiter);
1056 	if (dump_vios) {
1057 		int i;
1058 		int dumped = 0;
1059 
1060 		for (i = 0; i < pool->limiter.limit; i++) {
1061 			struct data_vio *data_vio = &pool->data_vios[i];
1062 
1063 			if (!list_empty(&data_vio->pool_entry))
1064 				continue;
1065 
1066 			dump_data_vio(data_vio);
1067 			if (++dumped >= ELEMENTS_PER_BATCH) {
1068 				spin_unlock(&pool->lock);
1069 				dumped = 0;
1070 				fsleep(SLEEP_FOR_SYSLOG);
1071 				spin_lock(&pool->lock);
1072 			}
1073 		}
1074 	}
1075 
1076 	spin_unlock(&pool->lock);
1077 }
1078 
get_data_vio_pool_active_requests(struct data_vio_pool * pool)1079 data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool)
1080 {
1081 	return READ_ONCE(pool->limiter.busy);
1082 }
1083 
get_data_vio_pool_request_limit(struct data_vio_pool * pool)1084 data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool)
1085 {
1086 	return READ_ONCE(pool->limiter.limit);
1087 }
1088 
get_data_vio_pool_maximum_requests(struct data_vio_pool * pool)1089 data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool)
1090 {
1091 	return READ_ONCE(pool->limiter.max_busy);
1092 }
1093 
update_data_vio_error_stats(struct data_vio * data_vio)1094 static void update_data_vio_error_stats(struct data_vio *data_vio)
1095 {
1096 	u8 index = 0;
1097 	static const char * const operations[] = {
1098 		[0] = "empty",
1099 		[1] = "read",
1100 		[2] = "write",
1101 		[3] = "read-modify-write",
1102 		[5] = "read+fua",
1103 		[6] = "write+fua",
1104 		[7] = "read-modify-write+fua",
1105 	};
1106 
1107 	if (data_vio->read)
1108 		index = 1;
1109 
1110 	if (data_vio->write)
1111 		index += 2;
1112 
1113 	if (data_vio->fua)
1114 		index += 4;
1115 
1116 	update_vio_error_stats(&data_vio->vio,
1117 			       "Completing %s vio for LBN %llu with error after %s",
1118 			       operations[index],
1119 			       (unsigned long long) data_vio->logical.lbn,
1120 			       get_data_vio_operation_name(data_vio));
1121 }
1122 
1123 static void perform_cleanup_stage(struct data_vio *data_vio,
1124 				  enum data_vio_cleanup_stage stage);
1125 
1126 /**
1127  * release_allocated_lock() - Release the PBN lock and/or the reference on the allocated block at
1128  *			      the end of processing a data_vio.
1129  * @completion: The data_vio holding the lock.
1130  */
release_allocated_lock(struct vdo_completion * completion)1131 static void release_allocated_lock(struct vdo_completion *completion)
1132 {
1133 	struct data_vio *data_vio = as_data_vio(completion);
1134 
1135 	assert_data_vio_in_allocated_zone(data_vio);
1136 	release_data_vio_allocation_lock(data_vio, false);
1137 	perform_cleanup_stage(data_vio, VIO_RELEASE_RECOVERY_LOCKS);
1138 }
1139 
1140 /** release_lock() - Release an uncontended LBN lock. */
release_lock(struct data_vio * data_vio,struct lbn_lock * lock)1141 static void release_lock(struct data_vio *data_vio, struct lbn_lock *lock)
1142 {
1143 	struct int_map *lock_map = lock->zone->lbn_operations;
1144 	struct data_vio *lock_holder;
1145 
1146 	if (!lock->locked) {
1147 		/*  The lock is not locked, so it had better not be registered in the lock map. */
1148 		struct data_vio *lock_holder = vdo_int_map_get(lock_map, lock->lbn);
1149 
1150 		VDO_ASSERT_LOG_ONLY((data_vio != lock_holder),
1151 				    "no logical block lock held for block %llu",
1152 				    (unsigned long long) lock->lbn);
1153 		return;
1154 	}
1155 
1156 	/* Release the lock by removing the lock from the map. */
1157 	lock_holder = vdo_int_map_remove(lock_map, lock->lbn);
1158 	VDO_ASSERT_LOG_ONLY((data_vio == lock_holder),
1159 			    "logical block lock mismatch for block %llu",
1160 			    (unsigned long long) lock->lbn);
1161 	lock->locked = false;
1162 }
1163 
1164 /** transfer_lock() - Transfer a contended LBN lock to the eldest waiter. */
transfer_lock(struct data_vio * data_vio,struct lbn_lock * lock)1165 static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock)
1166 {
1167 	struct data_vio *lock_holder, *next_lock_holder;
1168 	int result;
1169 
1170 	VDO_ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked");
1171 
1172 	/* Another data_vio is waiting for the lock, transfer it in a single lock map operation. */
1173 	next_lock_holder =
1174 		vdo_waiter_as_data_vio(vdo_waitq_dequeue_waiter(&lock->waiters));
1175 
1176 	/* Transfer the remaining lock waiters to the next lock holder. */
1177 	vdo_waitq_transfer_all_waiters(&lock->waiters,
1178 				       &next_lock_holder->logical.waiters);
1179 
1180 	result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn,
1181 				 next_lock_holder, true, (void **) &lock_holder);
1182 	if (result != VDO_SUCCESS) {
1183 		continue_data_vio_with_error(next_lock_holder, result);
1184 		return;
1185 	}
1186 
1187 	VDO_ASSERT_LOG_ONLY((lock_holder == data_vio),
1188 			    "logical block lock mismatch for block %llu",
1189 			    (unsigned long long) lock->lbn);
1190 	lock->locked = false;
1191 
1192 	/*
1193 	 * If there are still waiters, other data_vios must be trying to get the lock we just
1194 	 * transferred. We must ensure that the new lock holder doesn't block in the packer.
1195 	 */
1196 	if (vdo_waitq_has_waiters(&next_lock_holder->logical.waiters))
1197 		cancel_data_vio_compression(next_lock_holder);
1198 
1199 	/*
1200 	 * Avoid stack overflow on lock transfer.
1201 	 * FIXME: this is only an issue in the 1 thread config.
1202 	 */
1203 	next_lock_holder->vio.completion.requeue = true;
1204 	launch_locked_request(next_lock_holder);
1205 }
1206 
1207 /**
1208  * release_logical_lock() - Release the logical block lock and flush generation lock at the end of
1209  *			    processing a data_vio.
1210  * @completion: The data_vio holding the lock.
1211  */
release_logical_lock(struct vdo_completion * completion)1212 static void release_logical_lock(struct vdo_completion *completion)
1213 {
1214 	struct data_vio *data_vio = as_data_vio(completion);
1215 	struct lbn_lock *lock = &data_vio->logical;
1216 
1217 	assert_data_vio_in_logical_zone(data_vio);
1218 
1219 	if (vdo_waitq_has_waiters(&lock->waiters))
1220 		transfer_lock(data_vio, lock);
1221 	else
1222 		release_lock(data_vio, lock);
1223 
1224 	vdo_release_flush_generation_lock(data_vio);
1225 	perform_cleanup_stage(data_vio, VIO_CLEANUP_DONE);
1226 }
1227 
1228 /** clean_hash_lock() - Release the hash lock at the end of processing a data_vio. */
clean_hash_lock(struct vdo_completion * completion)1229 static void clean_hash_lock(struct vdo_completion *completion)
1230 {
1231 	struct data_vio *data_vio = as_data_vio(completion);
1232 
1233 	assert_data_vio_in_hash_zone(data_vio);
1234 	if (completion->result != VDO_SUCCESS) {
1235 		vdo_clean_failed_hash_lock(data_vio);
1236 		return;
1237 	}
1238 
1239 	vdo_release_hash_lock(data_vio);
1240 	perform_cleanup_stage(data_vio, VIO_RELEASE_LOGICAL);
1241 }
1242 
1243 /**
1244  * finish_cleanup() - Make some assertions about a data_vio which has finished cleaning up.
1245  * @data_vio: The data_vio.
1246  *
1247  * If it is part of a multi-block discard, starts on the next block, otherwise, returns it to the
1248  * pool.
1249  */
finish_cleanup(struct data_vio * data_vio)1250 static void finish_cleanup(struct data_vio *data_vio)
1251 {
1252 	struct vdo_completion *completion = &data_vio->vio.completion;
1253 	u32 discard_size = min_t(u32, data_vio->remaining_discard,
1254 				 VDO_BLOCK_SIZE - data_vio->offset);
1255 
1256 	VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL,
1257 			    "complete data_vio has no allocation lock");
1258 	VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL,
1259 			    "complete data_vio has no hash lock");
1260 	if ((data_vio->remaining_discard <= discard_size) ||
1261 	    (completion->result != VDO_SUCCESS)) {
1262 		struct data_vio_pool *pool = completion->vdo->data_vio_pool;
1263 
1264 		vdo_funnel_queue_put(pool->queue, &completion->work_queue_entry_link);
1265 		schedule_releases(pool);
1266 		return;
1267 	}
1268 
1269 	data_vio->remaining_discard -= discard_size;
1270 	data_vio->is_partial = (data_vio->remaining_discard < VDO_BLOCK_SIZE);
1271 	data_vio->read = data_vio->is_partial;
1272 	data_vio->offset = 0;
1273 	completion->requeue = true;
1274 	data_vio->first_reference_operation_complete = false;
1275 	launch_data_vio(data_vio, data_vio->logical.lbn + 1);
1276 }
1277 
1278 /** perform_cleanup_stage() - Perform the next step in the process of cleaning up a data_vio. */
perform_cleanup_stage(struct data_vio * data_vio,enum data_vio_cleanup_stage stage)1279 static void perform_cleanup_stage(struct data_vio *data_vio,
1280 				  enum data_vio_cleanup_stage stage)
1281 {
1282 	struct vdo *vdo = vdo_from_data_vio(data_vio);
1283 
1284 	switch (stage) {
1285 	case VIO_RELEASE_HASH_LOCK:
1286 		if (data_vio->hash_lock != NULL) {
1287 			launch_data_vio_hash_zone_callback(data_vio, clean_hash_lock);
1288 			return;
1289 		}
1290 		fallthrough;
1291 
1292 	case VIO_RELEASE_ALLOCATED:
1293 		if (data_vio_has_allocation(data_vio)) {
1294 			launch_data_vio_allocated_zone_callback(data_vio,
1295 								release_allocated_lock);
1296 			return;
1297 		}
1298 		fallthrough;
1299 
1300 	case VIO_RELEASE_RECOVERY_LOCKS:
1301 		if ((data_vio->recovery_sequence_number > 0) &&
1302 		    (READ_ONCE(vdo->read_only_notifier.read_only_error) == VDO_SUCCESS) &&
1303 		    (data_vio->vio.completion.result != VDO_READ_ONLY))
1304 			vdo_log_warning("VDO not read-only when cleaning data_vio with RJ lock");
1305 		fallthrough;
1306 
1307 	case VIO_RELEASE_LOGICAL:
1308 		launch_data_vio_logical_callback(data_vio, release_logical_lock);
1309 		return;
1310 
1311 	default:
1312 		finish_cleanup(data_vio);
1313 	}
1314 }
1315 
complete_data_vio(struct vdo_completion * completion)1316 void complete_data_vio(struct vdo_completion *completion)
1317 {
1318 	struct data_vio *data_vio = as_data_vio(completion);
1319 
1320 	completion->error_handler = NULL;
1321 	data_vio->last_async_operation = VIO_ASYNC_OP_CLEANUP;
1322 	perform_cleanup_stage(data_vio,
1323 			      (data_vio->write ? VIO_CLEANUP_START : VIO_RELEASE_LOGICAL));
1324 }
1325 
enter_read_only_mode(struct vdo_completion * completion)1326 static void enter_read_only_mode(struct vdo_completion *completion)
1327 {
1328 	if (vdo_is_read_only(completion->vdo))
1329 		return;
1330 
1331 	if (completion->result != VDO_READ_ONLY) {
1332 		struct data_vio *data_vio = as_data_vio(completion);
1333 
1334 		vdo_log_error_strerror(completion->result,
1335 				       "Preparing to enter read-only mode: data_vio for LBN %llu (becoming mapped to %llu, previously mapped to %llu, allocated %llu) is completing with a fatal error after operation %s",
1336 				       (unsigned long long) data_vio->logical.lbn,
1337 				       (unsigned long long) data_vio->new_mapped.pbn,
1338 				       (unsigned long long) data_vio->mapped.pbn,
1339 				       (unsigned long long) data_vio->allocation.pbn,
1340 				       get_data_vio_operation_name(data_vio));
1341 	}
1342 
1343 	vdo_enter_read_only_mode(completion->vdo, completion->result);
1344 }
1345 
handle_data_vio_error(struct vdo_completion * completion)1346 void handle_data_vio_error(struct vdo_completion *completion)
1347 {
1348 	struct data_vio *data_vio = as_data_vio(completion);
1349 
1350 	if ((completion->result == VDO_READ_ONLY) || (data_vio->user_bio == NULL))
1351 		enter_read_only_mode(completion);
1352 
1353 	update_data_vio_error_stats(data_vio);
1354 	complete_data_vio(completion);
1355 }
1356 
1357 /**
1358  * get_data_vio_operation_name() - Get the name of the last asynchronous operation performed on a
1359  *				   data_vio.
1360  * @data_vio: The data_vio.
1361  */
get_data_vio_operation_name(struct data_vio * data_vio)1362 const char *get_data_vio_operation_name(struct data_vio *data_vio)
1363 {
1364 	BUILD_BUG_ON((MAX_VIO_ASYNC_OPERATION_NUMBER - MIN_VIO_ASYNC_OPERATION_NUMBER) !=
1365 		     ARRAY_SIZE(ASYNC_OPERATION_NAMES));
1366 
1367 	return ((data_vio->last_async_operation < MAX_VIO_ASYNC_OPERATION_NUMBER) ?
1368 		ASYNC_OPERATION_NAMES[data_vio->last_async_operation] :
1369 		"unknown async operation");
1370 }
1371 
1372 /**
1373  * data_vio_allocate_data_block() - Allocate a data block.
1374  * @data_vio: The data_vio.
1375  * @write_lock_type: The type of write lock to obtain on the block.
1376  * @callback: The callback which will attempt an allocation in the current zone and continue if it
1377  *	      succeeds.
1378  * @error_handler: The handler for errors while allocating.
1379  */
data_vio_allocate_data_block(struct data_vio * data_vio,enum pbn_lock_type write_lock_type,vdo_action_fn callback,vdo_action_fn error_handler)1380 void data_vio_allocate_data_block(struct data_vio *data_vio,
1381 				  enum pbn_lock_type write_lock_type,
1382 				  vdo_action_fn callback, vdo_action_fn error_handler)
1383 {
1384 	struct allocation *allocation = &data_vio->allocation;
1385 
1386 	VDO_ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK),
1387 			    "data_vio does not have an allocation");
1388 	allocation->write_lock_type = write_lock_type;
1389 	allocation->zone = vdo_get_next_allocation_zone(data_vio->logical.zone);
1390 	allocation->first_allocation_zone = allocation->zone->zone_number;
1391 
1392 	data_vio->vio.completion.error_handler = error_handler;
1393 	launch_data_vio_allocated_zone_callback(data_vio, callback);
1394 }
1395 
1396 /**
1397  * release_data_vio_allocation_lock() - Release the PBN lock on a data_vio's allocated block.
1398  * @data_vio: The data_vio.
1399  * @reset: If true, the allocation will be reset (i.e. any allocated pbn will be forgotten).
1400  *
1401  * If the reference to the locked block is still provisional, it will be released as well.
1402  */
release_data_vio_allocation_lock(struct data_vio * data_vio,bool reset)1403 void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset)
1404 {
1405 	struct allocation *allocation = &data_vio->allocation;
1406 	physical_block_number_t locked_pbn = allocation->pbn;
1407 
1408 	assert_data_vio_in_allocated_zone(data_vio);
1409 
1410 	if (reset || vdo_pbn_lock_has_provisional_reference(allocation->lock))
1411 		allocation->pbn = VDO_ZERO_BLOCK;
1412 
1413 	vdo_release_physical_zone_pbn_lock(allocation->zone, locked_pbn,
1414 					   vdo_forget(allocation->lock));
1415 }
1416 
1417 /**
1418  * uncompress_data_vio() - Uncompress the data a data_vio has just read.
1419  * @data_vio: The data_vio.
1420  * @mapping_state: The mapping state indicating which fragment to decompress.
1421  * @buffer: The buffer to receive the uncompressed data.
1422  */
uncompress_data_vio(struct data_vio * data_vio,enum block_mapping_state mapping_state,char * buffer)1423 int uncompress_data_vio(struct data_vio *data_vio,
1424 			enum block_mapping_state mapping_state, char *buffer)
1425 {
1426 	int size;
1427 	u16 fragment_offset, fragment_size;
1428 	struct compressed_block *block = data_vio->compression.block;
1429 	int result = vdo_get_compressed_block_fragment(mapping_state, block,
1430 						       &fragment_offset, &fragment_size);
1431 
1432 	if (result != VDO_SUCCESS) {
1433 		vdo_log_debug("%s: compressed fragment error %d", __func__, result);
1434 		return result;
1435 	}
1436 
1437 	size = LZ4_decompress_safe((block->data + fragment_offset), buffer,
1438 				   fragment_size, VDO_BLOCK_SIZE);
1439 	if (size != VDO_BLOCK_SIZE) {
1440 		vdo_log_debug("%s: lz4 error", __func__);
1441 		return VDO_INVALID_FRAGMENT;
1442 	}
1443 
1444 	return VDO_SUCCESS;
1445 }
1446 
1447 /**
1448  * modify_for_partial_write() - Do the modify-write part of a read-modify-write cycle.
1449  * @completion: The data_vio which has just finished its read.
1450  *
1451  * This callback is registered in read_block().
1452  */
modify_for_partial_write(struct vdo_completion * completion)1453 static void modify_for_partial_write(struct vdo_completion *completion)
1454 {
1455 	struct data_vio *data_vio = as_data_vio(completion);
1456 	char *data = data_vio->vio.data;
1457 	struct bio *bio = data_vio->user_bio;
1458 
1459 	assert_data_vio_on_cpu_thread(data_vio);
1460 
1461 	if (bio_op(bio) == REQ_OP_DISCARD) {
1462 		memset(data + data_vio->offset, '\0', min_t(u32,
1463 							    data_vio->remaining_discard,
1464 							    VDO_BLOCK_SIZE - data_vio->offset));
1465 	} else {
1466 		copy_from_bio(bio, data + data_vio->offset);
1467 	}
1468 
1469 	data_vio->is_zero = mem_is_zero(data, VDO_BLOCK_SIZE);
1470 	data_vio->read = false;
1471 	launch_data_vio_logical_callback(data_vio,
1472 					 continue_data_vio_with_block_map_slot);
1473 }
1474 
complete_read(struct vdo_completion * completion)1475 static void complete_read(struct vdo_completion *completion)
1476 {
1477 	struct data_vio *data_vio = as_data_vio(completion);
1478 	char *data = data_vio->vio.data;
1479 	bool compressed = vdo_is_state_compressed(data_vio->mapped.state);
1480 
1481 	assert_data_vio_on_cpu_thread(data_vio);
1482 
1483 	if (compressed) {
1484 		int result = uncompress_data_vio(data_vio, data_vio->mapped.state, data);
1485 
1486 		if (result != VDO_SUCCESS) {
1487 			continue_data_vio_with_error(data_vio, result);
1488 			return;
1489 		}
1490 	}
1491 
1492 	if (data_vio->write) {
1493 		modify_for_partial_write(completion);
1494 		return;
1495 	}
1496 
1497 	if (compressed || data_vio->is_partial)
1498 		copy_to_bio(data_vio->user_bio, data + data_vio->offset);
1499 
1500 	acknowledge_data_vio(data_vio);
1501 	complete_data_vio(completion);
1502 }
1503 
read_endio(struct bio * bio)1504 static void read_endio(struct bio *bio)
1505 {
1506 	struct data_vio *data_vio = vio_as_data_vio(bio->bi_private);
1507 	int result = blk_status_to_errno(bio->bi_status);
1508 
1509 	vdo_count_completed_bios(bio);
1510 	if (result != VDO_SUCCESS) {
1511 		continue_data_vio_with_error(data_vio, result);
1512 		return;
1513 	}
1514 
1515 	launch_data_vio_cpu_callback(data_vio, complete_read,
1516 				     CPU_Q_COMPLETE_READ_PRIORITY);
1517 }
1518 
complete_zero_read(struct vdo_completion * completion)1519 static void complete_zero_read(struct vdo_completion *completion)
1520 {
1521 	struct data_vio *data_vio = as_data_vio(completion);
1522 
1523 	assert_data_vio_on_cpu_thread(data_vio);
1524 
1525 	if (data_vio->is_partial) {
1526 		memset(data_vio->vio.data, 0, VDO_BLOCK_SIZE);
1527 		if (data_vio->write) {
1528 			modify_for_partial_write(completion);
1529 			return;
1530 		}
1531 	} else {
1532 		zero_fill_bio(data_vio->user_bio);
1533 	}
1534 
1535 	complete_read(completion);
1536 }
1537 
1538 /**
1539  * read_block() - Read a block asynchronously.
1540  * @completion: The data_vio doing the read.
1541  *
1542  * This is the callback registered in read_block_mapping().
1543  */
read_block(struct vdo_completion * completion)1544 static void read_block(struct vdo_completion *completion)
1545 {
1546 	struct data_vio *data_vio = as_data_vio(completion);
1547 	struct vio *vio = as_vio(completion);
1548 	int result = VDO_SUCCESS;
1549 
1550 	if (data_vio->mapped.pbn == VDO_ZERO_BLOCK) {
1551 		launch_data_vio_cpu_callback(data_vio, complete_zero_read,
1552 					     CPU_Q_COMPLETE_VIO_PRIORITY);
1553 		return;
1554 	}
1555 
1556 	data_vio->last_async_operation = VIO_ASYNC_OP_READ_DATA_VIO;
1557 	if (vdo_is_state_compressed(data_vio->mapped.state)) {
1558 		result = vio_reset_bio(vio, (char *) data_vio->compression.block,
1559 				       read_endio, REQ_OP_READ, data_vio->mapped.pbn);
1560 	} else {
1561 		blk_opf_t opf = ((data_vio->user_bio->bi_opf & PASSTHROUGH_FLAGS) | REQ_OP_READ);
1562 
1563 		if (data_vio->is_partial) {
1564 			result = vio_reset_bio(vio, vio->data, read_endio, opf,
1565 					       data_vio->mapped.pbn);
1566 		} else {
1567 			/* A full 4k read. Use the incoming bio to avoid having to copy the data */
1568 			bio_reset(vio->bio, vio->bio->bi_bdev, opf);
1569 			bio_init_clone(data_vio->user_bio->bi_bdev, vio->bio,
1570 				       data_vio->user_bio, GFP_KERNEL);
1571 
1572 			/* Copy over the original bio iovec and opflags. */
1573 			vdo_set_bio_properties(vio->bio, vio, read_endio, opf,
1574 					       data_vio->mapped.pbn);
1575 		}
1576 	}
1577 
1578 	if (result != VDO_SUCCESS) {
1579 		continue_data_vio_with_error(data_vio, result);
1580 		return;
1581 	}
1582 
1583 	vdo_submit_data_vio(data_vio);
1584 }
1585 
1586 static inline struct data_vio *
reference_count_update_completion_as_data_vio(struct vdo_completion * completion)1587 reference_count_update_completion_as_data_vio(struct vdo_completion *completion)
1588 {
1589 	if (completion->type == VIO_COMPLETION)
1590 		return as_data_vio(completion);
1591 
1592 	return container_of(completion, struct data_vio, decrement_completion);
1593 }
1594 
1595 /**
1596  * update_block_map() - Rendezvous of the data_vio and decrement completions after each has
1597  *                      made its reference updates. Handle any error from either, or proceed
1598  *                      to updating the block map.
1599  * @completion: The completion of the write in progress.
1600  */
update_block_map(struct vdo_completion * completion)1601 static void update_block_map(struct vdo_completion *completion)
1602 {
1603 	struct data_vio *data_vio = reference_count_update_completion_as_data_vio(completion);
1604 
1605 	assert_data_vio_in_logical_zone(data_vio);
1606 
1607 	if (!data_vio->first_reference_operation_complete) {
1608 		/* Rendezvous, we're first */
1609 		data_vio->first_reference_operation_complete = true;
1610 		return;
1611 	}
1612 
1613 	completion = &data_vio->vio.completion;
1614 	vdo_set_completion_result(completion, data_vio->decrement_completion.result);
1615 	if (completion->result != VDO_SUCCESS) {
1616 		handle_data_vio_error(completion);
1617 		return;
1618 	}
1619 
1620 	completion->error_handler = handle_data_vio_error;
1621 	if (data_vio->hash_lock != NULL)
1622 		set_data_vio_hash_zone_callback(data_vio, vdo_continue_hash_lock);
1623 	else
1624 		completion->callback = complete_data_vio;
1625 
1626 	data_vio->last_async_operation = VIO_ASYNC_OP_PUT_MAPPED_BLOCK;
1627 	vdo_put_mapped_block(data_vio);
1628 }
1629 
decrement_reference_count(struct vdo_completion * completion)1630 static void decrement_reference_count(struct vdo_completion *completion)
1631 {
1632 	struct data_vio *data_vio = container_of(completion, struct data_vio,
1633 						 decrement_completion);
1634 
1635 	assert_data_vio_in_mapped_zone(data_vio);
1636 
1637 	vdo_set_completion_callback(completion, update_block_map,
1638 				    data_vio->logical.zone->thread_id);
1639 	completion->error_handler = update_block_map;
1640 	vdo_modify_reference_count(completion, &data_vio->decrement_updater);
1641 }
1642 
increment_reference_count(struct vdo_completion * completion)1643 static void increment_reference_count(struct vdo_completion *completion)
1644 {
1645 	struct data_vio *data_vio = as_data_vio(completion);
1646 
1647 	assert_data_vio_in_new_mapped_zone(data_vio);
1648 
1649 	if (data_vio->downgrade_allocation_lock) {
1650 		/*
1651 		 * Now that the data has been written, it's safe to deduplicate against the
1652 		 * block. Downgrade the allocation lock to a read lock so it can be used later by
1653 		 * the hash lock. This is done here since it needs to happen sometime before we
1654 		 * return to the hash zone, and we are currently on the correct thread. For
1655 		 * compressed blocks, the downgrade will have already been done.
1656 		 */
1657 		vdo_downgrade_pbn_write_lock(data_vio->allocation.lock, false);
1658 	}
1659 
1660 	set_data_vio_logical_callback(data_vio, update_block_map);
1661 	completion->error_handler = update_block_map;
1662 	vdo_modify_reference_count(completion, &data_vio->increment_updater);
1663 }
1664 
1665 /** journal_remapping() - Add a recovery journal entry for a data remapping. */
journal_remapping(struct vdo_completion * completion)1666 static void journal_remapping(struct vdo_completion *completion)
1667 {
1668 	struct data_vio *data_vio = as_data_vio(completion);
1669 
1670 	assert_data_vio_in_journal_zone(data_vio);
1671 
1672 	data_vio->decrement_updater.operation = VDO_JOURNAL_DATA_REMAPPING;
1673 	data_vio->decrement_updater.zpbn = data_vio->mapped;
1674 	if (data_vio->new_mapped.pbn == VDO_ZERO_BLOCK) {
1675 		data_vio->first_reference_operation_complete = true;
1676 		if (data_vio->mapped.pbn == VDO_ZERO_BLOCK)
1677 			set_data_vio_logical_callback(data_vio, update_block_map);
1678 	} else {
1679 		set_data_vio_new_mapped_zone_callback(data_vio,
1680 						      increment_reference_count);
1681 	}
1682 
1683 	if (data_vio->mapped.pbn == VDO_ZERO_BLOCK) {
1684 		data_vio->first_reference_operation_complete = true;
1685 	} else {
1686 		vdo_set_completion_callback(&data_vio->decrement_completion,
1687 					    decrement_reference_count,
1688 					    data_vio->mapped.zone->thread_id);
1689 	}
1690 
1691 	data_vio->last_async_operation = VIO_ASYNC_OP_JOURNAL_REMAPPING;
1692 	vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio);
1693 }
1694 
1695 /**
1696  * read_old_block_mapping() - Get the previous PBN/LBN mapping of an in-progress write.
1697  * @completion: The data_vio doing the read.
1698  *
1699  * Gets the previous PBN mapped to this LBN from the block map, so as to make an appropriate
1700  * journal entry referencing the removal of this LBN->PBN mapping.
1701  */
read_old_block_mapping(struct vdo_completion * completion)1702 static void read_old_block_mapping(struct vdo_completion *completion)
1703 {
1704 	struct data_vio *data_vio = as_data_vio(completion);
1705 
1706 	assert_data_vio_in_logical_zone(data_vio);
1707 
1708 	data_vio->last_async_operation = VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_WRITE;
1709 	set_data_vio_journal_callback(data_vio, journal_remapping);
1710 	vdo_get_mapped_block(data_vio);
1711 }
1712 
update_metadata_for_data_vio_write(struct data_vio * data_vio,struct pbn_lock * lock)1713 void update_metadata_for_data_vio_write(struct data_vio *data_vio, struct pbn_lock *lock)
1714 {
1715 	data_vio->increment_updater = (struct reference_updater) {
1716 		.operation = VDO_JOURNAL_DATA_REMAPPING,
1717 		.increment = true,
1718 		.zpbn = data_vio->new_mapped,
1719 		.lock = lock,
1720 	};
1721 
1722 	launch_data_vio_logical_callback(data_vio, read_old_block_mapping);
1723 }
1724 
1725 /**
1726  * pack_compressed_data() - Attempt to pack the compressed data_vio into a block.
1727  * @completion: The data_vio.
1728  *
1729  * This is the callback registered in launch_compress_data_vio().
1730  */
pack_compressed_data(struct vdo_completion * completion)1731 static void pack_compressed_data(struct vdo_completion *completion)
1732 {
1733 	struct data_vio *data_vio = as_data_vio(completion);
1734 
1735 	assert_data_vio_in_packer_zone(data_vio);
1736 
1737 	if (!vdo_get_compressing(vdo_from_data_vio(data_vio)) ||
1738 	    get_data_vio_compression_status(data_vio).may_not_compress) {
1739 		write_data_vio(data_vio);
1740 		return;
1741 	}
1742 
1743 	data_vio->last_async_operation = VIO_ASYNC_OP_ATTEMPT_PACKING;
1744 	vdo_attempt_packing(data_vio);
1745 }
1746 
1747 /**
1748  * compress_data_vio() - Do the actual work of compressing the data on a CPU queue.
1749  * @completion: The data_vio.
1750  *
1751  * This callback is registered in launch_compress_data_vio().
1752  */
compress_data_vio(struct vdo_completion * completion)1753 static void compress_data_vio(struct vdo_completion *completion)
1754 {
1755 	struct data_vio *data_vio = as_data_vio(completion);
1756 	int size;
1757 
1758 	assert_data_vio_on_cpu_thread(data_vio);
1759 
1760 	/*
1761 	 * By putting the compressed data at the start of the compressed block data field, we won't
1762 	 * need to copy it if this data_vio becomes a compressed write agent.
1763 	 */
1764 	size = LZ4_compress_default(data_vio->vio.data,
1765 				    data_vio->compression.block->data, VDO_BLOCK_SIZE,
1766 				    VDO_MAX_COMPRESSED_FRAGMENT_SIZE,
1767 				    (char *) vdo_get_work_queue_private_data());
1768 	if ((size > 0) && (size < VDO_COMPRESSED_BLOCK_DATA_SIZE)) {
1769 		data_vio->compression.size = size;
1770 		launch_data_vio_packer_callback(data_vio, pack_compressed_data);
1771 		return;
1772 	}
1773 
1774 	write_data_vio(data_vio);
1775 }
1776 
1777 /**
1778  * launch_compress_data_vio() - Continue a write by attempting to compress the data.
1779  * @data_vio: The data_vio.
1780  *
1781  * This is a re-entry point to vio_write used by hash locks.
1782  */
launch_compress_data_vio(struct data_vio * data_vio)1783 void launch_compress_data_vio(struct data_vio *data_vio)
1784 {
1785 	VDO_ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block");
1786 	VDO_ASSERT_LOG_ONLY(data_vio->hash_lock != NULL,
1787 			    "data_vio to compress has a hash_lock");
1788 	VDO_ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio),
1789 			    "data_vio to compress has an allocation");
1790 
1791 	/*
1792 	 * There are 4 reasons why a data_vio which has reached this point will not be eligible for
1793 	 * compression:
1794 	 *
1795 	 * 1) Since data_vios can block indefinitely in the packer, it would be bad to do so if the
1796 	 * write request also requests FUA.
1797 	 *
1798 	 * 2) A data_vio should not be compressed when compression is disabled for the vdo.
1799 	 *
1800 	 * 3) A data_vio could be doing a partial write on behalf of a larger discard which has not
1801 	 * yet been acknowledged and hence blocking in the packer would be bad.
1802 	 *
1803 	 * 4) Some other data_vio may be waiting on this data_vio in which case blocking in the
1804 	 * packer would also be bad.
1805 	 */
1806 	if (data_vio->fua ||
1807 	    !vdo_get_compressing(vdo_from_data_vio(data_vio)) ||
1808 	    ((data_vio->user_bio != NULL) && (bio_op(data_vio->user_bio) == REQ_OP_DISCARD)) ||
1809 	    (advance_data_vio_compression_stage(data_vio).stage != DATA_VIO_COMPRESSING)) {
1810 		write_data_vio(data_vio);
1811 		return;
1812 	}
1813 
1814 	data_vio->last_async_operation = VIO_ASYNC_OP_COMPRESS_DATA_VIO;
1815 	launch_data_vio_cpu_callback(data_vio, compress_data_vio,
1816 				     CPU_Q_COMPRESS_BLOCK_PRIORITY);
1817 }
1818 
1819 /**
1820  * hash_data_vio() - Hash the data in a data_vio and set the hash zone (which also flags the record
1821  *		     name as set).
1822  * @completion: The data_vio.
1823  *
1824  * This callback is registered in prepare_for_dedupe().
1825  */
hash_data_vio(struct vdo_completion * completion)1826 static void hash_data_vio(struct vdo_completion *completion)
1827 {
1828 	struct data_vio *data_vio = as_data_vio(completion);
1829 
1830 	assert_data_vio_on_cpu_thread(data_vio);
1831 	VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed");
1832 
1833 	murmurhash3_128(data_vio->vio.data, VDO_BLOCK_SIZE, 0x62ea60be,
1834 			&data_vio->record_name);
1835 
1836 	data_vio->hash_zone = vdo_select_hash_zone(vdo_from_data_vio(data_vio)->hash_zones,
1837 						   &data_vio->record_name);
1838 	data_vio->last_async_operation = VIO_ASYNC_OP_ACQUIRE_VDO_HASH_LOCK;
1839 	launch_data_vio_hash_zone_callback(data_vio, vdo_acquire_hash_lock);
1840 }
1841 
1842 /** prepare_for_dedupe() - Prepare for the dedupe path after attempting to get an allocation. */
prepare_for_dedupe(struct data_vio * data_vio)1843 static void prepare_for_dedupe(struct data_vio *data_vio)
1844 {
1845 	/* We don't care what thread we are on. */
1846 	VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks");
1847 
1848 	/*
1849 	 * Before we can dedupe, we need to know the record name, so the first
1850 	 * step is to hash the block data.
1851 	 */
1852 	data_vio->last_async_operation = VIO_ASYNC_OP_HASH_DATA_VIO;
1853 	launch_data_vio_cpu_callback(data_vio, hash_data_vio, CPU_Q_HASH_BLOCK_PRIORITY);
1854 }
1855 
1856 /**
1857  * write_bio_finished() - This is the bio_end_io function registered in write_block() to be called
1858  *			  when a data_vio's write to the underlying storage has completed.
1859  * @bio: The bio to update.
1860  */
write_bio_finished(struct bio * bio)1861 static void write_bio_finished(struct bio *bio)
1862 {
1863 	struct data_vio *data_vio = vio_as_data_vio((struct vio *) bio->bi_private);
1864 
1865 	vdo_count_completed_bios(bio);
1866 	vdo_set_completion_result(&data_vio->vio.completion,
1867 				  blk_status_to_errno(bio->bi_status));
1868 	data_vio->downgrade_allocation_lock = true;
1869 	update_metadata_for_data_vio_write(data_vio, data_vio->allocation.lock);
1870 }
1871 
1872 /** write_data_vio() - Write a data block to storage without compression. */
write_data_vio(struct data_vio * data_vio)1873 void write_data_vio(struct data_vio *data_vio)
1874 {
1875 	struct data_vio_compression_status status, new_status;
1876 	int result;
1877 
1878 	if (!data_vio_has_allocation(data_vio)) {
1879 		/*
1880 		 * There was no space to write this block and we failed to deduplicate or compress
1881 		 * it.
1882 		 */
1883 		continue_data_vio_with_error(data_vio, VDO_NO_SPACE);
1884 		return;
1885 	}
1886 
1887 	new_status = (struct data_vio_compression_status) {
1888 		.stage = DATA_VIO_POST_PACKER,
1889 		.may_not_compress = true,
1890 	};
1891 
1892 	do {
1893 		status = get_data_vio_compression_status(data_vio);
1894 	} while ((status.stage != DATA_VIO_POST_PACKER) &&
1895 		 !set_data_vio_compression_status(data_vio, status, new_status));
1896 
1897 	/* Write the data from the data block buffer. */
1898 	result = vio_reset_bio(&data_vio->vio, data_vio->vio.data,
1899 			       write_bio_finished, REQ_OP_WRITE,
1900 			       data_vio->allocation.pbn);
1901 	if (result != VDO_SUCCESS) {
1902 		continue_data_vio_with_error(data_vio, result);
1903 		return;
1904 	}
1905 
1906 	data_vio->last_async_operation = VIO_ASYNC_OP_WRITE_DATA_VIO;
1907 	vdo_submit_data_vio(data_vio);
1908 }
1909 
1910 /**
1911  * acknowledge_write_callback() - Acknowledge a write to the requestor.
1912  * @completion: The data_vio.
1913  *
1914  * This callback is registered in allocate_block() and continue_write_with_block_map_slot().
1915  */
acknowledge_write_callback(struct vdo_completion * completion)1916 static void acknowledge_write_callback(struct vdo_completion *completion)
1917 {
1918 	struct data_vio *data_vio = as_data_vio(completion);
1919 	struct vdo *vdo = completion->vdo;
1920 
1921 	VDO_ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) ||
1922 			     (vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)),
1923 			    "%s() called on bio ack queue", __func__);
1924 	VDO_ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio),
1925 			    "write VIO to be acknowledged has a flush generation lock");
1926 	acknowledge_data_vio(data_vio);
1927 	if (data_vio->new_mapped.pbn == VDO_ZERO_BLOCK) {
1928 		/* This is a zero write or discard */
1929 		update_metadata_for_data_vio_write(data_vio, NULL);
1930 		return;
1931 	}
1932 
1933 	prepare_for_dedupe(data_vio);
1934 }
1935 
1936 /**
1937  * allocate_block() - Attempt to allocate a block in the current allocation zone.
1938  * @completion: The data_vio.
1939  *
1940  * This callback is registered in continue_write_with_block_map_slot().
1941  */
allocate_block(struct vdo_completion * completion)1942 static void allocate_block(struct vdo_completion *completion)
1943 {
1944 	struct data_vio *data_vio = as_data_vio(completion);
1945 
1946 	assert_data_vio_in_allocated_zone(data_vio);
1947 
1948 	if (!vdo_allocate_block_in_zone(data_vio))
1949 		return;
1950 
1951 	completion->error_handler = handle_data_vio_error;
1952 	WRITE_ONCE(data_vio->allocation_succeeded, true);
1953 	data_vio->new_mapped = (struct zoned_pbn) {
1954 		.zone = data_vio->allocation.zone,
1955 		.pbn = data_vio->allocation.pbn,
1956 		.state = VDO_MAPPING_STATE_UNCOMPRESSED,
1957 	};
1958 
1959 	if (data_vio->fua ||
1960 	    data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) {
1961 		prepare_for_dedupe(data_vio);
1962 		return;
1963 	}
1964 
1965 	data_vio->last_async_operation = VIO_ASYNC_OP_ACKNOWLEDGE_WRITE;
1966 	launch_data_vio_on_bio_ack_queue(data_vio, acknowledge_write_callback);
1967 }
1968 
1969 /**
1970  * handle_allocation_error() - Handle an error attempting to allocate a block.
1971  * @completion: The data_vio.
1972  *
1973  * This error handler is registered in continue_write_with_block_map_slot().
1974  */
handle_allocation_error(struct vdo_completion * completion)1975 static void handle_allocation_error(struct vdo_completion *completion)
1976 {
1977 	struct data_vio *data_vio = as_data_vio(completion);
1978 
1979 	if (completion->result == VDO_NO_SPACE) {
1980 		/* We failed to get an allocation, but we can try to dedupe. */
1981 		vdo_reset_completion(completion);
1982 		completion->error_handler = handle_data_vio_error;
1983 		prepare_for_dedupe(data_vio);
1984 		return;
1985 	}
1986 
1987 	/* We got a "real" error, not just a failure to allocate, so fail the request. */
1988 	handle_data_vio_error(completion);
1989 }
1990 
assert_is_discard(struct data_vio * data_vio)1991 static int assert_is_discard(struct data_vio *data_vio)
1992 {
1993 	int result = VDO_ASSERT(data_vio->is_discard,
1994 				"data_vio with no block map page is a discard");
1995 
1996 	return ((result == VDO_SUCCESS) ? result : VDO_READ_ONLY);
1997 }
1998 
1999 /**
2000  * continue_data_vio_with_block_map_slot() - Read the data_vio's mapping from the block map.
2001  * @completion: The data_vio to continue.
2002  *
2003  * This callback is registered in launch_read_data_vio().
2004  */
continue_data_vio_with_block_map_slot(struct vdo_completion * completion)2005 void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
2006 {
2007 	struct data_vio *data_vio = as_data_vio(completion);
2008 
2009 	assert_data_vio_in_logical_zone(data_vio);
2010 	if (data_vio->read) {
2011 		set_data_vio_logical_callback(data_vio, read_block);
2012 		data_vio->last_async_operation = VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_READ;
2013 		vdo_get_mapped_block(data_vio);
2014 		return;
2015 	}
2016 
2017 	vdo_acquire_flush_generation_lock(data_vio);
2018 
2019 	if (data_vio->tree_lock.tree_slots[0].block_map_slot.pbn == VDO_ZERO_BLOCK) {
2020 		/*
2021 		 * This is a discard for a block on a block map page which has not been allocated, so
2022 		 * there's nothing more we need to do.
2023 		 */
2024 		completion->callback = complete_data_vio;
2025 		continue_data_vio_with_error(data_vio, assert_is_discard(data_vio));
2026 		return;
2027 	}
2028 
2029 	/*
2030 	 * We need an allocation if this is neither a full-block discard nor a
2031 	 * full-block zero write.
2032 	 */
2033 	if (!data_vio->is_zero && (!data_vio->is_discard || data_vio->is_partial)) {
2034 		data_vio_allocate_data_block(data_vio, VIO_WRITE_LOCK, allocate_block,
2035 					     handle_allocation_error);
2036 		return;
2037 	}
2038 
2039 	/*
2040 	 * We don't need to write any data, so skip allocation and just update the block map and
2041 	 * reference counts (via the journal).
2042 	 */
2043 	data_vio->new_mapped.pbn = VDO_ZERO_BLOCK;
2044 	if (data_vio->is_zero)
2045 		data_vio->new_mapped.state = VDO_MAPPING_STATE_UNCOMPRESSED;
2046 
2047 	if (data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) {
2048 		/* This is not the final block of a discard so we can't acknowledge it yet. */
2049 		update_metadata_for_data_vio_write(data_vio, NULL);
2050 		return;
2051 	}
2052 
2053 	data_vio->last_async_operation = VIO_ASYNC_OP_ACKNOWLEDGE_WRITE;
2054 	launch_data_vio_on_bio_ack_queue(data_vio, acknowledge_write_callback);
2055 }
2056