1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/module.h>
3 #include <linux/moduleparam.h>
4 #include <linux/rbtree_augmented.h>
5 #include <linux/prandom.h>
6 #include <linux/slab.h>
7 #include <asm/timex.h>
8 
9 #define __param(type, name, init, msg)		\
10 	static type name = init;		\
11 	module_param(name, type, 0444);		\
12 	MODULE_PARM_DESC(name, msg);
13 
14 __param(int, nnodes, 100, "Number of nodes in the rb-tree");
15 __param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
16 __param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
17 __param(ullong, seed, 3141592653589793238ULL, "Random seed");
18 
19 struct test_node {
20 	u32 key;
21 	struct rb_node rb;
22 
23 	/* following fields used for testing augmented rbtree functionality */
24 	u32 val;
25 	u32 augmented;
26 };
27 
28 static struct rb_root_cached root = RB_ROOT_CACHED;
29 static struct test_node *nodes = NULL;
30 
31 static struct rnd_state rnd;
32 
insert(struct test_node * node,struct rb_root_cached * root)33 static void insert(struct test_node *node, struct rb_root_cached *root)
34 {
35 	struct rb_node **new = &root->rb_root.rb_node, *parent = NULL;
36 	u32 key = node->key;
37 
38 	while (*new) {
39 		parent = *new;
40 		if (key < rb_entry(parent, struct test_node, rb)->key)
41 			new = &parent->rb_left;
42 		else
43 			new = &parent->rb_right;
44 	}
45 
46 	rb_link_node(&node->rb, parent, new);
47 	rb_insert_color(&node->rb, &root->rb_root);
48 }
49 
insert_cached(struct test_node * node,struct rb_root_cached * root)50 static void insert_cached(struct test_node *node, struct rb_root_cached *root)
51 {
52 	struct rb_node **new = &root->rb_root.rb_node, *parent = NULL;
53 	u32 key = node->key;
54 	bool leftmost = true;
55 
56 	while (*new) {
57 		parent = *new;
58 		if (key < rb_entry(parent, struct test_node, rb)->key)
59 			new = &parent->rb_left;
60 		else {
61 			new = &parent->rb_right;
62 			leftmost = false;
63 		}
64 	}
65 
66 	rb_link_node(&node->rb, parent, new);
67 	rb_insert_color_cached(&node->rb, root, leftmost);
68 }
69 
erase(struct test_node * node,struct rb_root_cached * root)70 static inline void erase(struct test_node *node, struct rb_root_cached *root)
71 {
72 	rb_erase(&node->rb, &root->rb_root);
73 }
74 
erase_cached(struct test_node * node,struct rb_root_cached * root)75 static inline void erase_cached(struct test_node *node, struct rb_root_cached *root)
76 {
77 	rb_erase_cached(&node->rb, root);
78 }
79 
80 
81 #define NODE_VAL(node) ((node)->val)
82 
RB_DECLARE_CALLBACKS_MAX(static,augment_callbacks,struct test_node,rb,u32,augmented,NODE_VAL)83 RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
84 			 struct test_node, rb, u32, augmented, NODE_VAL)
85 
86 static void insert_augmented(struct test_node *node,
87 			     struct rb_root_cached *root)
88 {
89 	struct rb_node **new = &root->rb_root.rb_node, *rb_parent = NULL;
90 	u32 key = node->key;
91 	u32 val = node->val;
92 	struct test_node *parent;
93 
94 	while (*new) {
95 		rb_parent = *new;
96 		parent = rb_entry(rb_parent, struct test_node, rb);
97 		if (parent->augmented < val)
98 			parent->augmented = val;
99 		if (key < parent->key)
100 			new = &parent->rb.rb_left;
101 		else
102 			new = &parent->rb.rb_right;
103 	}
104 
105 	node->augmented = val;
106 	rb_link_node(&node->rb, rb_parent, new);
107 	rb_insert_augmented(&node->rb, &root->rb_root, &augment_callbacks);
108 }
109 
insert_augmented_cached(struct test_node * node,struct rb_root_cached * root)110 static void insert_augmented_cached(struct test_node *node,
111 				    struct rb_root_cached *root)
112 {
113 	struct rb_node **new = &root->rb_root.rb_node, *rb_parent = NULL;
114 	u32 key = node->key;
115 	u32 val = node->val;
116 	struct test_node *parent;
117 	bool leftmost = true;
118 
119 	while (*new) {
120 		rb_parent = *new;
121 		parent = rb_entry(rb_parent, struct test_node, rb);
122 		if (parent->augmented < val)
123 			parent->augmented = val;
124 		if (key < parent->key)
125 			new = &parent->rb.rb_left;
126 		else {
127 			new = &parent->rb.rb_right;
128 			leftmost = false;
129 		}
130 	}
131 
132 	node->augmented = val;
133 	rb_link_node(&node->rb, rb_parent, new);
134 	rb_insert_augmented_cached(&node->rb, root,
135 				   leftmost, &augment_callbacks);
136 }
137 
138 
erase_augmented(struct test_node * node,struct rb_root_cached * root)139 static void erase_augmented(struct test_node *node, struct rb_root_cached *root)
140 {
141 	rb_erase_augmented(&node->rb, &root->rb_root, &augment_callbacks);
142 }
143 
erase_augmented_cached(struct test_node * node,struct rb_root_cached * root)144 static void erase_augmented_cached(struct test_node *node,
145 				   struct rb_root_cached *root)
146 {
147 	rb_erase_augmented_cached(&node->rb, root, &augment_callbacks);
148 }
149 
init(void)150 static void init(void)
151 {
152 	int i;
153 	for (i = 0; i < nnodes; i++) {
154 		nodes[i].key = prandom_u32_state(&rnd);
155 		nodes[i].val = prandom_u32_state(&rnd);
156 	}
157 }
158 
is_red(struct rb_node * rb)159 static bool is_red(struct rb_node *rb)
160 {
161 	return !(rb->__rb_parent_color & 1);
162 }
163 
black_path_count(struct rb_node * rb)164 static int black_path_count(struct rb_node *rb)
165 {
166 	int count;
167 	for (count = 0; rb; rb = rb_parent(rb))
168 		count += !is_red(rb);
169 	return count;
170 }
171 
check_postorder_foreach(int nr_nodes)172 static void check_postorder_foreach(int nr_nodes)
173 {
174 	struct test_node *cur, *n;
175 	int count = 0;
176 	rbtree_postorder_for_each_entry_safe(cur, n, &root.rb_root, rb)
177 		count++;
178 
179 	WARN_ON_ONCE(count != nr_nodes);
180 }
181 
check_postorder(int nr_nodes)182 static void check_postorder(int nr_nodes)
183 {
184 	struct rb_node *rb;
185 	int count = 0;
186 	for (rb = rb_first_postorder(&root.rb_root); rb; rb = rb_next_postorder(rb))
187 		count++;
188 
189 	WARN_ON_ONCE(count != nr_nodes);
190 }
191 
check(int nr_nodes)192 static void check(int nr_nodes)
193 {
194 	struct rb_node *rb;
195 	int count = 0, blacks = 0;
196 	u32 prev_key = 0;
197 
198 	for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
199 		struct test_node *node = rb_entry(rb, struct test_node, rb);
200 		WARN_ON_ONCE(node->key < prev_key);
201 		WARN_ON_ONCE(is_red(rb) &&
202 			     (!rb_parent(rb) || is_red(rb_parent(rb))));
203 		if (!count)
204 			blacks = black_path_count(rb);
205 		else
206 			WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) &&
207 				     blacks != black_path_count(rb));
208 		prev_key = node->key;
209 		count++;
210 	}
211 
212 	WARN_ON_ONCE(count != nr_nodes);
213 	WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root.rb_root))) - 1);
214 
215 	check_postorder(nr_nodes);
216 	check_postorder_foreach(nr_nodes);
217 }
218 
check_augmented(int nr_nodes)219 static void check_augmented(int nr_nodes)
220 {
221 	struct rb_node *rb;
222 
223 	check(nr_nodes);
224 	for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
225 		struct test_node *node = rb_entry(rb, struct test_node, rb);
226 		u32 subtree, max = node->val;
227 		if (node->rb.rb_left) {
228 			subtree = rb_entry(node->rb.rb_left, struct test_node,
229 					   rb)->augmented;
230 			if (max < subtree)
231 				max = subtree;
232 		}
233 		if (node->rb.rb_right) {
234 			subtree = rb_entry(node->rb.rb_right, struct test_node,
235 					   rb)->augmented;
236 			if (max < subtree)
237 				max = subtree;
238 		}
239 		WARN_ON_ONCE(node->augmented != max);
240 	}
241 }
242 
basic_check(void)243 static int basic_check(void)
244 {
245 	int i, j;
246 	cycles_t time1, time2, time;
247 	struct rb_node *node;
248 
249 	printk(KERN_ALERT "rbtree testing");
250 
251 	init();
252 
253 	time1 = get_cycles();
254 
255 	for (i = 0; i < perf_loops; i++) {
256 		for (j = 0; j < nnodes; j++)
257 			insert(nodes + j, &root);
258 		for (j = 0; j < nnodes; j++)
259 			erase(nodes + j, &root);
260 	}
261 
262 	time2 = get_cycles();
263 	time = time2 - time1;
264 
265 	time = div_u64(time, perf_loops);
266 	printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n",
267 	       (unsigned long long)time);
268 
269 	time1 = get_cycles();
270 
271 	for (i = 0; i < perf_loops; i++) {
272 		for (j = 0; j < nnodes; j++)
273 			insert_cached(nodes + j, &root);
274 		for (j = 0; j < nnodes; j++)
275 			erase_cached(nodes + j, &root);
276 	}
277 
278 	time2 = get_cycles();
279 	time = time2 - time1;
280 
281 	time = div_u64(time, perf_loops);
282 	printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n",
283 	       (unsigned long long)time);
284 
285 	for (i = 0; i < nnodes; i++)
286 		insert(nodes + i, &root);
287 
288 	time1 = get_cycles();
289 
290 	for (i = 0; i < perf_loops; i++) {
291 		for (node = rb_first(&root.rb_root); node; node = rb_next(node))
292 			;
293 	}
294 
295 	time2 = get_cycles();
296 	time = time2 - time1;
297 
298 	time = div_u64(time, perf_loops);
299 	printk(" -> test 3 (latency of inorder traversal): %llu cycles\n",
300 	       (unsigned long long)time);
301 
302 	time1 = get_cycles();
303 
304 	for (i = 0; i < perf_loops; i++)
305 		node = rb_first(&root.rb_root);
306 
307 	time2 = get_cycles();
308 	time = time2 - time1;
309 
310 	time = div_u64(time, perf_loops);
311 	printk(" -> test 4 (latency to fetch first node)\n");
312 	printk("        non-cached: %llu cycles\n", (unsigned long long)time);
313 
314 	time1 = get_cycles();
315 
316 	for (i = 0; i < perf_loops; i++)
317 		node = rb_first_cached(&root);
318 
319 	time2 = get_cycles();
320 	time = time2 - time1;
321 
322 	time = div_u64(time, perf_loops);
323 	printk("        cached: %llu cycles\n", (unsigned long long)time);
324 
325 	for (i = 0; i < nnodes; i++)
326 		erase(nodes + i, &root);
327 
328 	/* run checks */
329 	for (i = 0; i < check_loops; i++) {
330 		init();
331 		for (j = 0; j < nnodes; j++) {
332 			check(j);
333 			insert(nodes + j, &root);
334 		}
335 		for (j = 0; j < nnodes; j++) {
336 			check(nnodes - j);
337 			erase(nodes + j, &root);
338 		}
339 		check(0);
340 	}
341 
342 	return 0;
343 }
344 
augmented_check(void)345 static int augmented_check(void)
346 {
347 	int i, j;
348 	cycles_t time1, time2, time;
349 
350 	printk(KERN_ALERT "augmented rbtree testing");
351 
352 	init();
353 
354 	time1 = get_cycles();
355 
356 	for (i = 0; i < perf_loops; i++) {
357 		for (j = 0; j < nnodes; j++)
358 			insert_augmented(nodes + j, &root);
359 		for (j = 0; j < nnodes; j++)
360 			erase_augmented(nodes + j, &root);
361 	}
362 
363 	time2 = get_cycles();
364 	time = time2 - time1;
365 
366 	time = div_u64(time, perf_loops);
367 	printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsigned long long)time);
368 
369 	time1 = get_cycles();
370 
371 	for (i = 0; i < perf_loops; i++) {
372 		for (j = 0; j < nnodes; j++)
373 			insert_augmented_cached(nodes + j, &root);
374 		for (j = 0; j < nnodes; j++)
375 			erase_augmented_cached(nodes + j, &root);
376 	}
377 
378 	time2 = get_cycles();
379 	time = time2 - time1;
380 
381 	time = div_u64(time, perf_loops);
382 	printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", (unsigned long long)time);
383 
384 	for (i = 0; i < check_loops; i++) {
385 		init();
386 		for (j = 0; j < nnodes; j++) {
387 			check_augmented(j);
388 			insert_augmented(nodes + j, &root);
389 		}
390 		for (j = 0; j < nnodes; j++) {
391 			check_augmented(nnodes - j);
392 			erase_augmented(nodes + j, &root);
393 		}
394 		check_augmented(0);
395 	}
396 
397 	return 0;
398 }
399 
rbtree_test_init(void)400 static int __init rbtree_test_init(void)
401 {
402 	nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
403 	if (!nodes)
404 		return -ENOMEM;
405 
406 	prandom_seed_state(&rnd, seed);
407 
408 	basic_check();
409 	augmented_check();
410 
411 	kfree(nodes);
412 
413 	return -EAGAIN; /* Fail will directly unload the module */
414 }
415 
rbtree_test_exit(void)416 static void __exit rbtree_test_exit(void)
417 {
418 	printk(KERN_ALERT "test exit\n");
419 }
420 
421 module_init(rbtree_test_init)
422 module_exit(rbtree_test_exit)
423 
424 MODULE_LICENSE("GPL");
425 MODULE_AUTHOR("Michel Lespinasse");
426 MODULE_DESCRIPTION("Red Black Tree test");
427