xref: /linux/drivers/md/dm-vdo/logical-zone.c (revision a5f998094fa344cdd1342164948abb4d7c6101ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2023 Red Hat
4  */
5 
6 #include "logical-zone.h"
7 
8 #include "logger.h"
9 #include "memory-alloc.h"
10 #include "permassert.h"
11 #include "string-utils.h"
12 
13 #include "action-manager.h"
14 #include "admin-state.h"
15 #include "block-map.h"
16 #include "completion.h"
17 #include "constants.h"
18 #include "data-vio.h"
19 #include "flush.h"
20 #include "int-map.h"
21 #include "physical-zone.h"
22 #include "vdo.h"
23 
24 #define ALLOCATIONS_PER_ZONE 128
25 
26 /**
27  * as_logical_zone() - Convert a generic vdo_completion to a logical_zone.
28  * @completion: The completion to convert.
29  *
30  * Return: The completion as a logical_zone.
31  */
as_logical_zone(struct vdo_completion * completion)32 static struct logical_zone *as_logical_zone(struct vdo_completion *completion)
33 {
34 	vdo_assert_completion_type(completion, VDO_GENERATION_FLUSHED_COMPLETION);
35 	return container_of(completion, struct logical_zone, completion);
36 }
37 
38 /* get_thread_id_for_zone() - Implements vdo_zone_thread_getter_fn. */
get_thread_id_for_zone(void * context,zone_count_t zone_number)39 static thread_id_t get_thread_id_for_zone(void *context, zone_count_t zone_number)
40 {
41 	struct logical_zones *zones = context;
42 
43 	return zones->zones[zone_number].thread_id;
44 }
45 
46 /**
47  * initialize_zone() - Initialize a logical zone.
48  * @zones: The logical_zones to which this zone belongs.
49  * @zone_number: The logical_zone's index.
50  */
initialize_zone(struct logical_zones * zones,zone_count_t zone_number)51 static int initialize_zone(struct logical_zones *zones, zone_count_t zone_number)
52 {
53 	int result;
54 	struct vdo *vdo = zones->vdo;
55 	struct logical_zone *zone = &zones->zones[zone_number];
56 	zone_count_t allocation_zone_number;
57 
58 	result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->lbn_operations);
59 	if (result != VDO_SUCCESS)
60 		return result;
61 
62 	if (zone_number < vdo->thread_config.logical_zone_count - 1)
63 		zone->next = &zones->zones[zone_number + 1];
64 
65 	vdo_initialize_completion(&zone->completion, vdo,
66 				  VDO_GENERATION_FLUSHED_COMPLETION);
67 	zone->zones = zones;
68 	zone->zone_number = zone_number;
69 	zone->thread_id = vdo->thread_config.logical_threads[zone_number];
70 	zone->block_map_zone = &vdo->block_map->zones[zone_number];
71 	INIT_LIST_HEAD(&zone->write_vios);
72 	vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
73 
74 	allocation_zone_number = zone->thread_id % vdo->thread_config.physical_zone_count;
75 	zone->allocation_zone = &vdo->physical_zones->zones[allocation_zone_number];
76 
77 	return vdo_make_default_thread(vdo, zone->thread_id);
78 }
79 
80 /**
81  * vdo_make_logical_zones() - Create a set of logical zones.
82  * @vdo: The vdo to which the zones will belong.
83  * @zones_ptr: A pointer to hold the new zones.
84  *
85  * Return: VDO_SUCCESS or an error code.
86  */
vdo_make_logical_zones(struct vdo * vdo,struct logical_zones ** zones_ptr)87 int vdo_make_logical_zones(struct vdo *vdo, struct logical_zones **zones_ptr)
88 {
89 	struct logical_zones *zones;
90 	int result;
91 	zone_count_t zone;
92 	zone_count_t zone_count = vdo->thread_config.logical_zone_count;
93 
94 	if (zone_count == 0)
95 		return VDO_SUCCESS;
96 
97 	result = vdo_allocate_extended(zone_count, zones, __func__, &zones);
98 	if (result != VDO_SUCCESS)
99 		return result;
100 
101 	zones->vdo = vdo;
102 	zones->zone_count = zone_count;
103 	for (zone = 0; zone < zone_count; zone++) {
104 		result = initialize_zone(zones, zone);
105 		if (result != VDO_SUCCESS) {
106 			vdo_free_logical_zones(zones);
107 			return result;
108 		}
109 	}
110 
111 	result = vdo_make_action_manager(zones->zone_count, get_thread_id_for_zone,
112 					 vdo->thread_config.admin_thread, zones, NULL,
113 					 vdo, &zones->manager);
114 	if (result != VDO_SUCCESS) {
115 		vdo_free_logical_zones(zones);
116 		return result;
117 	}
118 
119 	*zones_ptr = zones;
120 	return VDO_SUCCESS;
121 }
122 
123 /**
124  * vdo_free_logical_zones() - Free a set of logical zones.
125  * @zones: The set of zones to free.
126  */
vdo_free_logical_zones(struct logical_zones * zones)127 void vdo_free_logical_zones(struct logical_zones *zones)
128 {
129 	zone_count_t index;
130 
131 	if (zones == NULL)
132 		return;
133 
134 	vdo_free(vdo_forget(zones->manager));
135 
136 	for (index = 0; index < zones->zone_count; index++)
137 		vdo_int_map_free(vdo_forget(zones->zones[index].lbn_operations));
138 
139 	vdo_free(zones);
140 }
141 
assert_on_zone_thread(struct logical_zone * zone,const char * what)142 static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what)
143 {
144 	VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
145 			    "%s() called on correct thread", what);
146 }
147 
148 /**
149  * check_for_drain_complete() - Check whether this zone has drained.
150  * @zone: The zone to check.
151  */
check_for_drain_complete(struct logical_zone * zone)152 static void check_for_drain_complete(struct logical_zone *zone)
153 {
154 	if (!vdo_is_state_draining(&zone->state) || zone->notifying ||
155 	    !list_empty(&zone->write_vios))
156 		return;
157 
158 	vdo_finish_draining(&zone->state);
159 }
160 
161 /** Implements vdo_admin_initiator_fn. */
initiate_drain(struct admin_state * state)162 static void initiate_drain(struct admin_state *state)
163 {
164 	check_for_drain_complete(container_of(state, struct logical_zone, state));
165 }
166 
167 /** Implements vdo_zone_action_fn. */
drain_logical_zone(void * context,zone_count_t zone_number,struct vdo_completion * parent)168 static void drain_logical_zone(void *context, zone_count_t zone_number,
169 			       struct vdo_completion *parent)
170 {
171 	struct logical_zones *zones = context;
172 
173 	vdo_start_draining(&zones->zones[zone_number].state,
174 			   vdo_get_current_manager_operation(zones->manager), parent,
175 			   initiate_drain);
176 }
177 
vdo_drain_logical_zones(struct logical_zones * zones,const struct admin_state_code * operation,struct vdo_completion * parent)178 void vdo_drain_logical_zones(struct logical_zones *zones,
179 			     const struct admin_state_code *operation,
180 			     struct vdo_completion *parent)
181 {
182 	vdo_schedule_operation(zones->manager, operation, NULL, drain_logical_zone, NULL,
183 			       parent);
184 }
185 
186 /** Implements vdo_zone_action_fn. */
resume_logical_zone(void * context,zone_count_t zone_number,struct vdo_completion * parent)187 static void resume_logical_zone(void *context, zone_count_t zone_number,
188 				struct vdo_completion *parent)
189 {
190 	struct logical_zone *zone = &(((struct logical_zones *) context)->zones[zone_number]);
191 
192 	vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
193 }
194 
195 /**
196  * vdo_resume_logical_zones() - Resume a set of logical zones.
197  * @zones: The logical zones to resume.
198  * @parent: The object to notify when the zones have resumed.
199  */
vdo_resume_logical_zones(struct logical_zones * zones,struct vdo_completion * parent)200 void vdo_resume_logical_zones(struct logical_zones *zones, struct vdo_completion *parent)
201 {
202 	vdo_schedule_operation(zones->manager, VDO_ADMIN_STATE_RESUMING, NULL,
203 			       resume_logical_zone, NULL, parent);
204 }
205 
206 /**
207  * update_oldest_active_generation() - Update the oldest active generation.
208  * @zone: The zone.
209  *
210  * Return: true if the oldest active generation has changed.
211  */
update_oldest_active_generation(struct logical_zone * zone)212 static bool update_oldest_active_generation(struct logical_zone *zone)
213 {
214 	struct data_vio *data_vio =
215 		list_first_entry_or_null(&zone->write_vios, struct data_vio,
216 					 write_entry);
217 	sequence_number_t oldest =
218 		(data_vio == NULL) ? zone->flush_generation : data_vio->flush_generation;
219 
220 	if (oldest == zone->oldest_active_generation)
221 		return false;
222 
223 	WRITE_ONCE(zone->oldest_active_generation, oldest);
224 	return true;
225 }
226 
227 /**
228  * vdo_increment_logical_zone_flush_generation() - Increment the flush generation in a logical
229  *                                                 zone.
230  * @zone: The logical zone.
231  * @expected_generation: The expected value of the flush generation before the increment.
232  */
vdo_increment_logical_zone_flush_generation(struct logical_zone * zone,sequence_number_t expected_generation)233 void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone,
234 						 sequence_number_t expected_generation)
235 {
236 	assert_on_zone_thread(zone, __func__);
237 	VDO_ASSERT_LOG_ONLY((zone->flush_generation == expected_generation),
238 			    "logical zone %u flush generation %llu should be %llu before increment",
239 			    zone->zone_number, (unsigned long long) zone->flush_generation,
240 			    (unsigned long long) expected_generation);
241 
242 	zone->flush_generation++;
243 	zone->ios_in_flush_generation = 0;
244 	update_oldest_active_generation(zone);
245 }
246 
247 /**
248  * vdo_acquire_flush_generation_lock() - Acquire the shared lock on a flush generation by a write
249  *                                       data_vio.
250  * @data_vio: The data_vio.
251  */
vdo_acquire_flush_generation_lock(struct data_vio * data_vio)252 void vdo_acquire_flush_generation_lock(struct data_vio *data_vio)
253 {
254 	struct logical_zone *zone = data_vio->logical.zone;
255 
256 	assert_on_zone_thread(zone, __func__);
257 	VDO_ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal");
258 
259 	data_vio->flush_generation = zone->flush_generation;
260 	list_add_tail(&data_vio->write_entry, &zone->write_vios);
261 	zone->ios_in_flush_generation++;
262 }
263 
264 static void attempt_generation_complete_notification(struct vdo_completion *completion);
265 
266 /**
267  * notify_flusher() - Notify the flush that at least one generation no longer has active VIOs.
268  * @completion: The zone completion.
269  *
270  * This callback is registered in attempt_generation_complete_notification().
271  */
notify_flusher(struct vdo_completion * completion)272 static void notify_flusher(struct vdo_completion *completion)
273 {
274 	struct logical_zone *zone = as_logical_zone(completion);
275 
276 	vdo_complete_flushes(zone->zones->vdo->flusher);
277 	vdo_launch_completion_callback(completion,
278 				       attempt_generation_complete_notification,
279 				       zone->thread_id);
280 }
281 
282 /**
283  * attempt_generation_complete_notification() - Notify the flusher if some generation no
284  *                                              longer has active VIOs.
285  * @completion: The zone completion.
286  */
attempt_generation_complete_notification(struct vdo_completion * completion)287 static void attempt_generation_complete_notification(struct vdo_completion *completion)
288 {
289 	struct logical_zone *zone = as_logical_zone(completion);
290 
291 	assert_on_zone_thread(zone, __func__);
292 	if (zone->oldest_active_generation <= zone->notification_generation) {
293 		zone->notifying = false;
294 		check_for_drain_complete(zone);
295 		return;
296 	}
297 
298 	zone->notifying = true;
299 	zone->notification_generation = zone->oldest_active_generation;
300 	vdo_launch_completion_callback(&zone->completion, notify_flusher,
301 				       vdo_get_flusher_thread_id(zone->zones->vdo->flusher));
302 }
303 
304 /**
305  * vdo_release_flush_generation_lock() - Release the shared lock on a flush generation held by a
306  *                                       write data_vio.
307  * @data_vio: The data_vio whose lock is to be released.
308  *
309  * If there are pending flushes, and this data_vio completes the oldest generation active in this
310  * zone, an attempt will be made to finish any flushes which may now be complete.
311  */
vdo_release_flush_generation_lock(struct data_vio * data_vio)312 void vdo_release_flush_generation_lock(struct data_vio *data_vio)
313 {
314 	struct logical_zone *zone = data_vio->logical.zone;
315 
316 	assert_on_zone_thread(zone, __func__);
317 
318 	if (!data_vio_has_flush_generation_lock(data_vio))
319 		return;
320 
321 	list_del_init(&data_vio->write_entry);
322 	VDO_ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation),
323 			    "data_vio releasing lock on generation %llu is not older than oldest active generation %llu",
324 			    (unsigned long long) data_vio->flush_generation,
325 			    (unsigned long long) zone->oldest_active_generation);
326 
327 	if (!update_oldest_active_generation(zone) || zone->notifying)
328 		return;
329 
330 	attempt_generation_complete_notification(&zone->completion);
331 }
332 
vdo_get_next_allocation_zone(struct logical_zone * zone)333 struct physical_zone *vdo_get_next_allocation_zone(struct logical_zone *zone)
334 {
335 	if (zone->allocation_count == ALLOCATIONS_PER_ZONE) {
336 		zone->allocation_count = 0;
337 		zone->allocation_zone = zone->allocation_zone->next;
338 	}
339 
340 	zone->allocation_count++;
341 	return zone->allocation_zone;
342 }
343 
344 /**
345  * vdo_dump_logical_zone() - Dump information about a logical zone to the log for debugging.
346  * @zone: The zone to dump.
347  *
348  * Context: the information is dumped in a thread-unsafe fashion.
349  *
350  */
vdo_dump_logical_zone(const struct logical_zone * zone)351 void vdo_dump_logical_zone(const struct logical_zone *zone)
352 {
353 	vdo_log_info("logical_zone %u", zone->zone_number);
354 	vdo_log_info("  flush_generation=%llu oldest_active_generation=%llu notification_generation=%llu notifying=%s ios_in_flush_generation=%llu",
355 		     (unsigned long long) READ_ONCE(zone->flush_generation),
356 		     (unsigned long long) READ_ONCE(zone->oldest_active_generation),
357 		     (unsigned long long) READ_ONCE(zone->notification_generation),
358 		     vdo_bool_to_string(READ_ONCE(zone->notifying)),
359 		     (unsigned long long) READ_ONCE(zone->ios_in_flush_generation));
360 }
361