xref: /linux/kernel/locking/spinlock_debug.c (revision a23e1966932464e1c5226cb9ac4ce1d5fc10ba22)
1  /*
2   * Copyright 2005, Red Hat, Inc., Ingo Molnar
3   * Released under the General Public License (GPL).
4   *
5   * This file contains the spinlock/rwlock implementations for
6   * DEBUG_SPINLOCK.
7   */
8  
9  #include <linux/spinlock.h>
10  #include <linux/nmi.h>
11  #include <linux/interrupt.h>
12  #include <linux/debug_locks.h>
13  #include <linux/delay.h>
14  #include <linux/export.h>
15  #include <linux/pid.h>
16  
__raw_spin_lock_init(raw_spinlock_t * lock,const char * name,struct lock_class_key * key,short inner)17  void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
18  			  struct lock_class_key *key, short inner)
19  {
20  #ifdef CONFIG_DEBUG_LOCK_ALLOC
21  	/*
22  	 * Make sure we are not reinitializing a held lock:
23  	 */
24  	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
25  	lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
26  #endif
27  	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
28  	lock->magic = SPINLOCK_MAGIC;
29  	lock->owner = SPINLOCK_OWNER_INIT;
30  	lock->owner_cpu = -1;
31  }
32  
33  EXPORT_SYMBOL(__raw_spin_lock_init);
34  
35  #ifndef CONFIG_PREEMPT_RT
__rwlock_init(rwlock_t * lock,const char * name,struct lock_class_key * key)36  void __rwlock_init(rwlock_t *lock, const char *name,
37  		   struct lock_class_key *key)
38  {
39  #ifdef CONFIG_DEBUG_LOCK_ALLOC
40  	/*
41  	 * Make sure we are not reinitializing a held lock:
42  	 */
43  	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
44  	lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
45  #endif
46  	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
47  	lock->magic = RWLOCK_MAGIC;
48  	lock->owner = SPINLOCK_OWNER_INIT;
49  	lock->owner_cpu = -1;
50  }
51  
52  EXPORT_SYMBOL(__rwlock_init);
53  #endif
54  
spin_dump(raw_spinlock_t * lock,const char * msg)55  static void spin_dump(raw_spinlock_t *lock, const char *msg)
56  {
57  	struct task_struct *owner = READ_ONCE(lock->owner);
58  
59  	if (owner == SPINLOCK_OWNER_INIT)
60  		owner = NULL;
61  	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
62  		msg, raw_smp_processor_id(),
63  		current->comm, task_pid_nr(current));
64  	printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
65  			".owner_cpu: %d\n",
66  		lock, READ_ONCE(lock->magic),
67  		owner ? owner->comm : "<none>",
68  		owner ? task_pid_nr(owner) : -1,
69  		READ_ONCE(lock->owner_cpu));
70  	dump_stack();
71  }
72  
spin_bug(raw_spinlock_t * lock,const char * msg)73  static void spin_bug(raw_spinlock_t *lock, const char *msg)
74  {
75  	if (!debug_locks_off())
76  		return;
77  
78  	spin_dump(lock, msg);
79  }
80  
81  #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
82  
83  static inline void
debug_spin_lock_before(raw_spinlock_t * lock)84  debug_spin_lock_before(raw_spinlock_t *lock)
85  {
86  	SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
87  	SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
88  	SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
89  							lock, "cpu recursion");
90  }
91  
debug_spin_lock_after(raw_spinlock_t * lock)92  static inline void debug_spin_lock_after(raw_spinlock_t *lock)
93  {
94  	WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
95  	WRITE_ONCE(lock->owner, current);
96  }
97  
debug_spin_unlock(raw_spinlock_t * lock)98  static inline void debug_spin_unlock(raw_spinlock_t *lock)
99  {
100  	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
101  	SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
102  	SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
103  	SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
104  							lock, "wrong CPU");
105  	WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
106  	WRITE_ONCE(lock->owner_cpu, -1);
107  }
108  
109  /*
110   * We are now relying on the NMI watchdog to detect lockup instead of doing
111   * the detection here with an unfair lock which can cause problem of its own.
112   */
do_raw_spin_lock(raw_spinlock_t * lock)113  void do_raw_spin_lock(raw_spinlock_t *lock)
114  {
115  	debug_spin_lock_before(lock);
116  	arch_spin_lock(&lock->raw_lock);
117  	mmiowb_spin_lock();
118  	debug_spin_lock_after(lock);
119  }
120  
do_raw_spin_trylock(raw_spinlock_t * lock)121  int do_raw_spin_trylock(raw_spinlock_t *lock)
122  {
123  	int ret = arch_spin_trylock(&lock->raw_lock);
124  
125  	if (ret) {
126  		mmiowb_spin_lock();
127  		debug_spin_lock_after(lock);
128  	}
129  #ifndef CONFIG_SMP
130  	/*
131  	 * Must not happen on UP:
132  	 */
133  	SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
134  #endif
135  	return ret;
136  }
137  
do_raw_spin_unlock(raw_spinlock_t * lock)138  void do_raw_spin_unlock(raw_spinlock_t *lock)
139  {
140  	mmiowb_spin_unlock();
141  	debug_spin_unlock(lock);
142  	arch_spin_unlock(&lock->raw_lock);
143  }
144  
145  #ifndef CONFIG_PREEMPT_RT
rwlock_bug(rwlock_t * lock,const char * msg)146  static void rwlock_bug(rwlock_t *lock, const char *msg)
147  {
148  	if (!debug_locks_off())
149  		return;
150  
151  	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
152  		msg, raw_smp_processor_id(), current->comm,
153  		task_pid_nr(current), lock);
154  	dump_stack();
155  }
156  
157  #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
158  
do_raw_read_lock(rwlock_t * lock)159  void do_raw_read_lock(rwlock_t *lock)
160  {
161  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
162  	arch_read_lock(&lock->raw_lock);
163  }
164  
do_raw_read_trylock(rwlock_t * lock)165  int do_raw_read_trylock(rwlock_t *lock)
166  {
167  	int ret = arch_read_trylock(&lock->raw_lock);
168  
169  #ifndef CONFIG_SMP
170  	/*
171  	 * Must not happen on UP:
172  	 */
173  	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
174  #endif
175  	return ret;
176  }
177  
do_raw_read_unlock(rwlock_t * lock)178  void do_raw_read_unlock(rwlock_t *lock)
179  {
180  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
181  	arch_read_unlock(&lock->raw_lock);
182  }
183  
debug_write_lock_before(rwlock_t * lock)184  static inline void debug_write_lock_before(rwlock_t *lock)
185  {
186  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
187  	RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
188  	RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
189  							lock, "cpu recursion");
190  }
191  
debug_write_lock_after(rwlock_t * lock)192  static inline void debug_write_lock_after(rwlock_t *lock)
193  {
194  	WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
195  	WRITE_ONCE(lock->owner, current);
196  }
197  
debug_write_unlock(rwlock_t * lock)198  static inline void debug_write_unlock(rwlock_t *lock)
199  {
200  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
201  	RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
202  	RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
203  							lock, "wrong CPU");
204  	WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
205  	WRITE_ONCE(lock->owner_cpu, -1);
206  }
207  
do_raw_write_lock(rwlock_t * lock)208  void do_raw_write_lock(rwlock_t *lock)
209  {
210  	debug_write_lock_before(lock);
211  	arch_write_lock(&lock->raw_lock);
212  	debug_write_lock_after(lock);
213  }
214  
do_raw_write_trylock(rwlock_t * lock)215  int do_raw_write_trylock(rwlock_t *lock)
216  {
217  	int ret = arch_write_trylock(&lock->raw_lock);
218  
219  	if (ret)
220  		debug_write_lock_after(lock);
221  #ifndef CONFIG_SMP
222  	/*
223  	 * Must not happen on UP:
224  	 */
225  	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
226  #endif
227  	return ret;
228  }
229  
do_raw_write_unlock(rwlock_t * lock)230  void do_raw_write_unlock(rwlock_t *lock)
231  {
232  	debug_write_unlock(lock);
233  	arch_write_unlock(&lock->raw_lock);
234  }
235  
236  #endif /* !CONFIG_PREEMPT_RT */
237