xref: /linux/kernel/locking/lockdep_internals.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * kernel/lockdep_internals.h
4  *
5  * Runtime locking correctness validator
6  *
7  * lockdep subsystem internal functions and variables.
8  */
9 
10 /*
11  * Lock-class usage-state bits:
12  */
13 enum lock_usage_bit {
14 #define LOCKDEP_STATE(__STATE)		\
15 	LOCK_USED_IN_##__STATE,		\
16 	LOCK_USED_IN_##__STATE##_READ,	\
17 	LOCK_ENABLED_##__STATE,		\
18 	LOCK_ENABLED_##__STATE##_READ,
19 #include "lockdep_states.h"
20 #undef LOCKDEP_STATE
21 	LOCK_USED,
22 	LOCK_USED_READ,
23 	LOCK_USAGE_STATES,
24 };
25 
26 /* states after LOCK_USED_READ are not traced and printed */
27 static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
28 
29 #define LOCK_USAGE_READ_MASK 1
30 #define LOCK_USAGE_DIR_MASK  2
31 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
32 
33 /*
34  * Usage-state bitmasks:
35  */
36 #define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
37 
38 enum {
39 #define LOCKDEP_STATE(__STATE)						\
40 	__LOCKF(USED_IN_##__STATE)					\
41 	__LOCKF(USED_IN_##__STATE##_READ)				\
42 	__LOCKF(ENABLED_##__STATE)					\
43 	__LOCKF(ENABLED_##__STATE##_READ)
44 #include "lockdep_states.h"
45 #undef LOCKDEP_STATE
46 	__LOCKF(USED)
47 	__LOCKF(USED_READ)
48 };
49 
50 enum {
51 #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE |
52 	LOCKF_ENABLED_IRQ =
53 #include "lockdep_states.h"
54 	0,
55 #undef LOCKDEP_STATE
56 
57 #define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE |
58 	LOCKF_USED_IN_IRQ =
59 #include "lockdep_states.h"
60 	0,
61 #undef LOCKDEP_STATE
62 
63 #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE##_READ |
64 	LOCKF_ENABLED_IRQ_READ =
65 #include "lockdep_states.h"
66 	0,
67 #undef LOCKDEP_STATE
68 
69 #define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE##_READ |
70 	LOCKF_USED_IN_IRQ_READ =
71 #include "lockdep_states.h"
72 	0,
73 #undef LOCKDEP_STATE
74 };
75 
76 #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
77 #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
78 
79 #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
80 #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
81 
82 /*
83  * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
84  * .data and .bss to fit in required 32MB limit for the kernel. With
85  * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
86  * So, reduce the static allocations for lockdeps related structures so that
87  * everything fits in current required size limit.
88  */
89 #ifdef CONFIG_LOCKDEP_SMALL
90 /*
91  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
92  * we track.
93  *
94  * We use the per-lock dependency maps in two ways: we grow it by adding
95  * every to-be-taken lock to all currently held lock's own dependency
96  * table (if it's not there yet), and we check it for lock order
97  * conflicts and deadlocks.
98  */
99 #define MAX_LOCKDEP_ENTRIES	16384UL
100 #define MAX_LOCKDEP_CHAINS_BITS	15
101 #define MAX_STACK_TRACE_ENTRIES	262144UL
102 #define STACK_TRACE_HASH_SIZE	8192
103 #else
104 #define MAX_LOCKDEP_ENTRIES	(1UL << CONFIG_LOCKDEP_BITS)
105 
106 #define MAX_LOCKDEP_CHAINS_BITS	CONFIG_LOCKDEP_CHAINS_BITS
107 
108 /*
109  * Stack-trace: tightly packed array of stack backtrace
110  * addresses. Protected by the hash_lock.
111  */
112 #define MAX_STACK_TRACE_ENTRIES	(1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS)
113 #define STACK_TRACE_HASH_SIZE	(1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS)
114 #endif
115 
116 /*
117  * Bit definitions for lock_chain.irq_context
118  */
119 #define LOCK_CHAIN_SOFTIRQ_CONTEXT	(1 << 0)
120 #define LOCK_CHAIN_HARDIRQ_CONTEXT	(1 << 1)
121 
122 #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
123 
124 #define AVG_LOCKDEP_CHAIN_DEPTH		5
125 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS * AVG_LOCKDEP_CHAIN_DEPTH)
126 
127 extern struct lock_chain lock_chains[];
128 
129 #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
130 
131 extern void get_usage_chars(struct lock_class *class,
132 			    char usage[LOCK_USAGE_CHARS]);
133 
134 extern const char *__get_key_name(const struct lockdep_subclass_key *key,
135 				  char *str);
136 
137 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
138 
139 extern unsigned long nr_lock_classes;
140 extern unsigned long nr_zapped_classes;
141 extern unsigned long nr_zapped_lock_chains;
142 extern unsigned long nr_list_entries;
143 extern unsigned long nr_dynamic_keys;
144 long lockdep_next_lockchain(long i);
145 unsigned long lock_chain_count(void);
146 extern unsigned long nr_stack_trace_entries;
147 
148 extern unsigned int nr_hardirq_chains;
149 extern unsigned int nr_softirq_chains;
150 extern unsigned int nr_process_chains;
151 extern unsigned int nr_free_chain_hlocks;
152 extern unsigned int nr_lost_chain_hlocks;
153 extern unsigned int nr_large_chain_blocks;
154 
155 extern unsigned int max_lockdep_depth;
156 extern unsigned int max_bfs_queue_depth;
157 extern unsigned long max_lock_class_idx;
158 
159 extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
160 extern unsigned long lock_classes_in_use[];
161 
162 #ifdef CONFIG_PROVE_LOCKING
163 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
164 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
165 #ifdef CONFIG_TRACE_IRQFLAGS
166 u64 lockdep_stack_trace_count(void);
167 u64 lockdep_stack_hash_count(void);
168 #endif
169 #else
170 static inline unsigned long
lockdep_count_forward_deps(struct lock_class * class)171 lockdep_count_forward_deps(struct lock_class *class)
172 {
173 	return 0;
174 }
175 static inline unsigned long
lockdep_count_backward_deps(struct lock_class * class)176 lockdep_count_backward_deps(struct lock_class *class)
177 {
178 	return 0;
179 }
180 #endif
181 
182 #ifdef CONFIG_DEBUG_LOCKDEP
183 
184 #include <asm/local.h>
185 /*
186  * Various lockdep statistics.
187  * We want them per cpu as they are often accessed in fast path
188  * and we want to avoid too much cache bouncing.
189  */
190 struct lockdep_stats {
191 	unsigned long  chain_lookup_hits;
192 	unsigned int   chain_lookup_misses;
193 	unsigned long  hardirqs_on_events;
194 	unsigned long  hardirqs_off_events;
195 	unsigned long  redundant_hardirqs_on;
196 	unsigned long  redundant_hardirqs_off;
197 	unsigned long  softirqs_on_events;
198 	unsigned long  softirqs_off_events;
199 	unsigned long  redundant_softirqs_on;
200 	unsigned long  redundant_softirqs_off;
201 	int            nr_unused_locks;
202 	unsigned int   nr_redundant_checks;
203 	unsigned int   nr_redundant;
204 	unsigned int   nr_cyclic_checks;
205 	unsigned int   nr_find_usage_forwards_checks;
206 	unsigned int   nr_find_usage_backwards_checks;
207 
208 	/*
209 	 * Per lock class locking operation stat counts
210 	 */
211 	unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
212 };
213 
214 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
215 
216 #define __debug_atomic_inc(ptr)					\
217 	this_cpu_inc(lockdep_stats.ptr);
218 
219 #define debug_atomic_inc(ptr)			{		\
220 	WARN_ON_ONCE(!irqs_disabled());				\
221 	__this_cpu_inc(lockdep_stats.ptr);			\
222 }
223 
224 #define debug_atomic_dec(ptr)			{		\
225 	WARN_ON_ONCE(!irqs_disabled());				\
226 	__this_cpu_dec(lockdep_stats.ptr);			\
227 }
228 
229 #define debug_atomic_read(ptr)		({				\
230 	struct lockdep_stats *__cpu_lockdep_stats;			\
231 	unsigned long long __total = 0;					\
232 	int __cpu;							\
233 	for_each_possible_cpu(__cpu) {					\
234 		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
235 		__total += __cpu_lockdep_stats->ptr;			\
236 	}								\
237 	__total;							\
238 })
239 
debug_class_ops_inc(struct lock_class * class)240 static inline void debug_class_ops_inc(struct lock_class *class)
241 {
242 	int idx;
243 
244 	idx = class - lock_classes;
245 	__debug_atomic_inc(lock_class_ops[idx]);
246 }
247 
debug_class_ops_read(struct lock_class * class)248 static inline unsigned long debug_class_ops_read(struct lock_class *class)
249 {
250 	int idx, cpu;
251 	unsigned long ops = 0;
252 
253 	idx = class - lock_classes;
254 	for_each_possible_cpu(cpu)
255 		ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
256 	return ops;
257 }
258 
259 #else
260 # define __debug_atomic_inc(ptr)	do { } while (0)
261 # define debug_atomic_inc(ptr)		do { } while (0)
262 # define debug_atomic_dec(ptr)		do { } while (0)
263 # define debug_atomic_read(ptr)		0
264 # define debug_class_ops_inc(ptr)	do { } while (0)
265 #endif
266