xref: /linux/drivers/md/dm-vdo/vdo.h (revision a5f998094fa344cdd1342164948abb4d7c6101ce)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright 2023 Red Hat
4  */
5 
6 #ifndef VDO_H
7 #define VDO_H
8 
9 #include <linux/atomic.h>
10 #include <linux/blk_types.h>
11 #include <linux/completion.h>
12 #include <linux/dm-kcopyd.h>
13 #include <linux/list.h>
14 #include <linux/spinlock.h>
15 
16 #include "admin-state.h"
17 #include "encodings.h"
18 #include "funnel-workqueue.h"
19 #include "packer.h"
20 #include "physical-zone.h"
21 #include "statistics.h"
22 #include "thread-registry.h"
23 #include "types.h"
24 
25 enum notifier_state {
26 	/* Notifications are allowed but not in progress */
27 	MAY_NOTIFY,
28 	/* A notification is in progress */
29 	NOTIFYING,
30 	/* Notifications are not allowed */
31 	MAY_NOT_NOTIFY,
32 	/* A notification has completed */
33 	NOTIFIED,
34 };
35 
36 /**
37  * typedef vdo_read_only_notification_fn - A function to notify a listener that the VDO has gone
38  *                                         read-only.
39  * @listener: The object to notify.
40  * @parent: The completion to notify in order to acknowledge the notification.
41  */
42 typedef void (*vdo_read_only_notification_fn)(void *listener, struct vdo_completion *parent);
43 
44 /*
45  * An object to be notified when the VDO enters read-only mode
46  */
47 struct read_only_listener {
48 	/* The listener */
49 	void *listener;
50 	/* The method to call to notify the listener */
51 	vdo_read_only_notification_fn notify;
52 	/* A pointer to the next listener */
53 	struct read_only_listener *next;
54 };
55 
56 struct vdo_thread {
57 	struct vdo *vdo;
58 	thread_id_t thread_id;
59 	struct vdo_work_queue *queue;
60 	/*
61 	 * Each thread maintains its own notion of whether the VDO is read-only so that the
62 	 * read-only state can be checked from any base thread without worrying about
63 	 * synchronization or thread safety. This does mean that knowledge of the VDO going
64 	 * read-only does not occur simultaneously across the VDO's threads, but that does not seem
65 	 * to cause any problems.
66 	 */
67 	bool is_read_only;
68 	/*
69 	 * A list of objects waiting to be notified on this thread that the VDO has entered
70 	 * read-only mode.
71 	 */
72 	struct read_only_listener *listeners;
73 	struct registered_thread allocating_thread;
74 };
75 
76 /* Keep struct bio statistics atomically */
77 struct atomic_bio_stats {
78 	atomic64_t read; /* Number of not REQ_WRITE bios */
79 	atomic64_t write; /* Number of REQ_WRITE bios */
80 	atomic64_t discard; /* Number of REQ_DISCARD bios */
81 	atomic64_t flush; /* Number of REQ_FLUSH bios */
82 	atomic64_t empty_flush; /* Number of REQ_PREFLUSH bios without data */
83 	atomic64_t fua; /* Number of REQ_FUA bios */
84 };
85 
86 /* Counters are atomic since updates can arrive concurrently from arbitrary threads. */
87 struct atomic_statistics {
88 	atomic64_t bios_submitted;
89 	atomic64_t bios_completed;
90 	atomic64_t flush_out;
91 	atomic64_t invalid_advice_pbn_count;
92 	atomic64_t no_space_error_count;
93 	atomic64_t read_only_error_count;
94 	struct atomic_bio_stats bios_in;
95 	struct atomic_bio_stats bios_in_partial;
96 	struct atomic_bio_stats bios_out;
97 	struct atomic_bio_stats bios_out_completed;
98 	struct atomic_bio_stats bios_acknowledged;
99 	struct atomic_bio_stats bios_acknowledged_partial;
100 	struct atomic_bio_stats bios_meta;
101 	struct atomic_bio_stats bios_meta_completed;
102 	struct atomic_bio_stats bios_journal;
103 	struct atomic_bio_stats bios_journal_completed;
104 	struct atomic_bio_stats bios_page_cache;
105 	struct atomic_bio_stats bios_page_cache_completed;
106 };
107 
108 struct read_only_notifier {
109 	/* The completion for entering read-only mode */
110 	struct vdo_completion completion;
111 	/* A completion waiting for notifications to be drained or enabled */
112 	struct vdo_completion *waiter;
113 	/* Lock to protect the next two fields */
114 	spinlock_t lock;
115 	/* The code of the error which put the VDO into read-only mode */
116 	int read_only_error;
117 	/* The current state of the notifier (values described above) */
118 	enum notifier_state state;
119 };
120 
121 /*
122  * The thread ID returned when the current thread is not a vdo thread, or can not be determined
123  * (usually due to being at interrupt context).
124  */
125 #define VDO_INVALID_THREAD_ID ((thread_id_t) -1)
126 
127 struct thread_config {
128 	zone_count_t logical_zone_count;
129 	zone_count_t physical_zone_count;
130 	zone_count_t hash_zone_count;
131 	thread_count_t bio_thread_count;
132 	thread_count_t thread_count;
133 	thread_id_t admin_thread;
134 	thread_id_t journal_thread;
135 	thread_id_t packer_thread;
136 	thread_id_t dedupe_thread;
137 	thread_id_t bio_ack_thread;
138 	thread_id_t cpu_thread;
139 	thread_id_t *logical_threads;
140 	thread_id_t *physical_threads;
141 	thread_id_t *hash_zone_threads;
142 	thread_id_t *bio_threads;
143 };
144 
145 struct thread_count_config;
146 
147 struct vdo_geometry_block {
148 	/* The vio for reading and writing the geometry block to disk */
149 	struct vio vio;
150 	/* A buffer to hold the geometry block */
151 	u8 *buffer;
152 };
153 
154 struct vdo_super_block {
155 	/* The vio for reading and writing the super block to disk */
156 	struct vio vio;
157 	/* A buffer to hold the super block */
158 	u8 *buffer;
159 	/* Whether this super block may not be written */
160 	bool unwritable;
161 };
162 
163 struct data_vio_pool;
164 
165 struct vdo_administrator {
166 	struct vdo_completion completion;
167 	struct admin_state state;
168 	atomic_t busy;
169 	u32 phase;
170 	struct completion callback_sync;
171 };
172 
173 struct vdo {
174 	char thread_name_prefix[MAX_VDO_WORK_QUEUE_NAME_LEN];
175 	struct vdo_thread *threads;
176 	vdo_action_fn action;
177 	struct vdo_completion *completion;
178 	struct vio_tracer *vio_tracer;
179 
180 	/* The atomic version of the state of this vdo */
181 	atomic_t state;
182 	/* The full state of all components */
183 	struct vdo_component_states states;
184 	/*
185 	 * A counter value to attach to thread names and log messages to identify the individual
186 	 * device.
187 	 */
188 	unsigned int instance;
189 	/* The read-only notifier */
190 	struct read_only_notifier read_only_notifier;
191 	/* The load-time configuration of this vdo */
192 	struct device_config *device_config;
193 	/* The thread mapping */
194 	struct thread_config thread_config;
195 
196 	/* The geometry block */
197 	struct vdo_geometry_block geometry_block;
198 
199 	/* The super block */
200 	struct vdo_super_block super_block;
201 
202 	/* The partitioning of the underlying storage */
203 	struct layout layout;
204 	struct layout next_layout;
205 	struct dm_kcopyd_client *partition_copier;
206 
207 	/* The block map */
208 	struct block_map *block_map;
209 
210 	/* The journal for block map recovery */
211 	struct recovery_journal *recovery_journal;
212 
213 	/* The slab depot */
214 	struct slab_depot *depot;
215 
216 	/* The compressed-block packer */
217 	struct packer *packer;
218 	/* Whether incoming data should be compressed */
219 	bool compressing;
220 
221 	/* The handler for flush requests */
222 	struct flusher *flusher;
223 
224 	/* The state the vdo was in when loaded (primarily for unit tests) */
225 	enum vdo_state load_state;
226 
227 	/* The logical zones of this vdo */
228 	struct logical_zones *logical_zones;
229 
230 	/* The physical zones of this vdo */
231 	struct physical_zones *physical_zones;
232 
233 	/* The hash lock zones of this vdo */
234 	struct hash_zones *hash_zones;
235 
236 	/* Bio submission manager used for sending bios to the storage device. */
237 	struct io_submitter *io_submitter;
238 
239 	/* The pool of data_vios for servicing incoming bios */
240 	struct data_vio_pool *data_vio_pool;
241 
242 	/* The manager for administrative operations */
243 	struct vdo_administrator admin;
244 
245 	/* Flags controlling administrative operations */
246 	const struct admin_state_code *suspend_type;
247 	bool allocations_allowed;
248 	bool dump_on_shutdown;
249 	bool needs_formatting;
250 	atomic_t processing_message;
251 
252 	/*
253 	 * Statistics
254 	 * Atomic stats counters
255 	 */
256 	struct atomic_statistics stats;
257 	/* Used to gather statistics without allocating memory */
258 	struct vdo_statistics stats_buffer;
259 	/* Protects the stats_buffer */
260 	struct mutex stats_mutex;
261 
262 	/* A list of all device_configs referencing this vdo */
263 	struct list_head device_config_list;
264 
265 	/* This VDO's list entry for the device registry */
266 	struct list_head registration;
267 
268 	/* Underlying block device info. */
269 	u64 starting_sector_offset;
270 	struct volume_geometry geometry;
271 
272 	/* N blobs of context data for LZ4 code, one per CPU thread. */
273 	char **compression_context;
274 };
275 
276 /**
277  * vdo_uses_bio_ack_queue() - Indicate whether the vdo is configured to use a separate work queue
278  *                            for acknowledging received and processed bios.
279  * @vdo: The vdo.
280  *
281  * Note that this directly controls the handling of write operations, but the compile-time flag
282  * VDO_USE_BIO_ACK_QUEUE_FOR_READ is also checked for read operations.
283  *
284  * Return: Whether a bio-acknowledgement work queue is in use.
285  */
vdo_uses_bio_ack_queue(struct vdo * vdo)286 static inline bool vdo_uses_bio_ack_queue(struct vdo *vdo)
287 {
288 	return vdo->device_config->thread_counts.bio_ack_threads > 0;
289 }
290 
291 /**
292  * typedef vdo_filter_fn - Method type for vdo matching methods.
293  * @vdo: The vdo to match.
294  * @context: A parameter for the filter to use.
295  *
296  * Return: True if the vdo matches the filter criteria, false if it doesn't.
297  */
298 typedef bool (*vdo_filter_fn)(struct vdo *vdo, const void *context);
299 
300 void vdo_initialize_device_registry_once(void);
301 struct vdo * __must_check vdo_find_matching(vdo_filter_fn filter, const void *context);
302 
303 int __must_check vdo_make_thread(struct vdo *vdo, thread_id_t thread_id,
304 				 const struct vdo_work_queue_type *type,
305 				 unsigned int queue_count, void *contexts[]);
306 
vdo_make_default_thread(struct vdo * vdo,thread_id_t thread_id)307 static inline int __must_check vdo_make_default_thread(struct vdo *vdo,
308 						       thread_id_t thread_id)
309 {
310 	return vdo_make_thread(vdo, thread_id, NULL, 1, NULL);
311 }
312 
313 int __must_check vdo_make(unsigned int instance, struct device_config *config,
314 			  char **reason, struct vdo **vdo_ptr);
315 
316 void vdo_destroy(struct vdo *vdo);
317 
318 int __must_check vdo_format_components(struct vdo *vdo);
319 
320 void vdo_format_super_block(struct vdo *vdo, struct vdo_completion *parent);
321 
322 void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent);
323 
324 struct block_device * __must_check vdo_get_backing_device(const struct vdo *vdo);
325 
326 const char * __must_check vdo_get_device_name(const struct dm_target *target);
327 
328 int __must_check vdo_synchronous_flush(struct vdo *vdo);
329 
330 const struct admin_state_code * __must_check vdo_get_admin_state(const struct vdo *vdo);
331 
332 bool vdo_set_compressing(struct vdo *vdo, bool enable);
333 
334 bool vdo_get_compressing(struct vdo *vdo);
335 
336 void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats);
337 
338 thread_id_t vdo_get_callback_thread_id(void);
339 
340 enum vdo_state __must_check vdo_get_state(const struct vdo *vdo);
341 
342 void vdo_set_state(struct vdo *vdo, enum vdo_state state);
343 
344 int vdo_clear_layout(struct vdo *vdo);
345 void vdo_save_geometry_block(struct vdo *vdo, struct vdo_completion *parent);
346 void vdo_save_super_block(struct vdo *vdo, struct vdo_completion *parent);
347 
348 void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent);
349 
350 int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
351 				    vdo_read_only_notification_fn notification,
352 				    thread_id_t thread_id);
353 
354 int vdo_enable_read_only_entry(struct vdo *vdo);
355 
356 void vdo_wait_until_not_entering_read_only_mode(struct vdo_completion *parent);
357 
358 void vdo_allow_read_only_mode_entry(struct vdo_completion *parent);
359 
360 void vdo_enter_read_only_mode(struct vdo *vdo, int error_code);
361 
362 bool __must_check vdo_is_read_only(struct vdo *vdo);
363 
364 bool __must_check vdo_in_read_only_mode(const struct vdo *vdo);
365 
366 bool __must_check vdo_in_recovery_mode(const struct vdo *vdo);
367 
368 void vdo_enter_recovery_mode(struct vdo *vdo);
369 
370 void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name);
371 
372 void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
373 				       const char *name);
374 
375 void vdo_assert_on_physical_zone_thread(const struct vdo *vdo, zone_count_t physical_zone,
376 					const char *name);
377 
378 int __must_check vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn,
379 				       struct physical_zone **zone_ptr);
380 
381 void vdo_dump_status(const struct vdo *vdo);
382 
383 #endif /* VDO_H */
384