1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008 Intel Corporation
4  * Author: Matthew Wilcox <willy@linux.intel.com>
5  *
6  * This file implements counting semaphores.
7  * A counting semaphore may be acquired 'n' times before sleeping.
8  * See mutex.c for single-acquisition sleeping locks which enforce
9  * rules which allow code to be debugged more easily.
10  */
11 
12 /*
13  * Some notes on the implementation:
14  *
15  * The spinlock controls access to the other members of the semaphore.
16  * down_trylock() and up() can be called from interrupt context, so we
17  * have to disable interrupts when taking the lock.  It turns out various
18  * parts of the kernel expect to be able to use down() on a semaphore in
19  * interrupt context when they know it will succeed, so we have to use
20  * irqsave variants for down(), down_interruptible() and down_killable()
21  * too.
22  *
23  * The ->count variable represents how many more tasks can acquire this
24  * semaphore.  If it's zero, there may be tasks waiting on the wait_list.
25  */
26 
27 #include <linux/compiler.h>
28 #include <linux/kernel.h>
29 #include <linux/export.h>
30 #include <linux/sched.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/wake_q.h>
33 #include <linux/semaphore.h>
34 #include <linux/spinlock.h>
35 #include <linux/ftrace.h>
36 #include <trace/events/lock.h>
37 #include <linux/hung_task.h>
38 
39 static noinline void __down(struct semaphore *sem);
40 static noinline int __down_interruptible(struct semaphore *sem);
41 static noinline int __down_killable(struct semaphore *sem);
42 static noinline int __down_timeout(struct semaphore *sem, long timeout);
43 static noinline void __up(struct semaphore *sem, struct wake_q_head *wake_q);
44 
45 #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
46 static inline void hung_task_sem_set_holder(struct semaphore *sem)
47 {
48 	WRITE_ONCE((sem)->last_holder, (unsigned long)current);
49 }
50 
51 static inline void hung_task_sem_clear_if_holder(struct semaphore *sem)
52 {
53 	if (READ_ONCE((sem)->last_holder) == (unsigned long)current)
54 		WRITE_ONCE((sem)->last_holder, 0UL);
55 }
56 
57 unsigned long sem_last_holder(struct semaphore *sem)
58 {
59 	return READ_ONCE(sem->last_holder);
60 }
61 #else
62 static inline void hung_task_sem_set_holder(struct semaphore *sem)
63 {
64 }
65 static inline void hung_task_sem_clear_if_holder(struct semaphore *sem)
66 {
67 }
68 unsigned long sem_last_holder(struct semaphore *sem)
69 {
70 	return 0UL;
71 }
72 #endif
73 
74 static inline void __sem_acquire(struct semaphore *sem)
75 {
76 	sem->count--;
77 	hung_task_sem_set_holder(sem);
78 }
79 
80 /**
81  * down - acquire the semaphore
82  * @sem: the semaphore to be acquired
83  *
84  * Acquires the semaphore.  If no more tasks are allowed to acquire the
85  * semaphore, calling this function will put the task to sleep until the
86  * semaphore is released.
87  *
88  * Use of this function is deprecated, please use down_interruptible() or
89  * down_killable() instead.
90  */
91 void __sched down(struct semaphore *sem)
92 {
93 	unsigned long flags;
94 
95 	might_sleep();
96 	raw_spin_lock_irqsave(&sem->lock, flags);
97 	if (likely(sem->count > 0))
98 		__sem_acquire(sem);
99 	else
100 		__down(sem);
101 	raw_spin_unlock_irqrestore(&sem->lock, flags);
102 }
103 EXPORT_SYMBOL(down);
104 
105 /**
106  * down_interruptible - acquire the semaphore unless interrupted
107  * @sem: the semaphore to be acquired
108  *
109  * Attempts to acquire the semaphore.  If no more tasks are allowed to
110  * acquire the semaphore, calling this function will put the task to sleep.
111  * If the sleep is interrupted by a signal, this function will return -EINTR.
112  * If the semaphore is successfully acquired, this function returns 0.
113  */
114 int __sched down_interruptible(struct semaphore *sem)
115 {
116 	unsigned long flags;
117 	int result = 0;
118 
119 	might_sleep();
120 	raw_spin_lock_irqsave(&sem->lock, flags);
121 	if (likely(sem->count > 0))
122 		__sem_acquire(sem);
123 	else
124 		result = __down_interruptible(sem);
125 	raw_spin_unlock_irqrestore(&sem->lock, flags);
126 
127 	return result;
128 }
129 EXPORT_SYMBOL(down_interruptible);
130 
131 /**
132  * down_killable - acquire the semaphore unless killed
133  * @sem: the semaphore to be acquired
134  *
135  * Attempts to acquire the semaphore.  If no more tasks are allowed to
136  * acquire the semaphore, calling this function will put the task to sleep.
137  * If the sleep is interrupted by a fatal signal, this function will return
138  * -EINTR.  If the semaphore is successfully acquired, this function returns
139  * 0.
140  */
141 int __sched down_killable(struct semaphore *sem)
142 {
143 	unsigned long flags;
144 	int result = 0;
145 
146 	might_sleep();
147 	raw_spin_lock_irqsave(&sem->lock, flags);
148 	if (likely(sem->count > 0))
149 		__sem_acquire(sem);
150 	else
151 		result = __down_killable(sem);
152 	raw_spin_unlock_irqrestore(&sem->lock, flags);
153 
154 	return result;
155 }
156 EXPORT_SYMBOL(down_killable);
157 
158 /**
159  * down_trylock - try to acquire the semaphore, without waiting
160  * @sem: the semaphore to be acquired
161  *
162  * Try to acquire the semaphore atomically.  Returns 0 if the semaphore has
163  * been acquired successfully or 1 if it cannot be acquired.
164  *
165  * NOTE: This return value is inverted from both spin_trylock and
166  * mutex_trylock!  Be careful about this when converting code.
167  *
168  * Unlike mutex_trylock, this function can be used from interrupt context,
169  * and the semaphore can be released by any task or interrupt.
170  */
171 int __sched down_trylock(struct semaphore *sem)
172 {
173 	unsigned long flags;
174 	int count;
175 
176 	raw_spin_lock_irqsave(&sem->lock, flags);
177 	count = sem->count - 1;
178 	if (likely(count >= 0))
179 		__sem_acquire(sem);
180 	raw_spin_unlock_irqrestore(&sem->lock, flags);
181 
182 	return (count < 0);
183 }
184 EXPORT_SYMBOL(down_trylock);
185 
186 /**
187  * down_timeout - acquire the semaphore within a specified time
188  * @sem: the semaphore to be acquired
189  * @timeout: how long to wait before failing
190  *
191  * Attempts to acquire the semaphore.  If no more tasks are allowed to
192  * acquire the semaphore, calling this function will put the task to sleep.
193  * If the semaphore is not released within the specified number of jiffies,
194  * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
195  */
196 int __sched down_timeout(struct semaphore *sem, long timeout)
197 {
198 	unsigned long flags;
199 	int result = 0;
200 
201 	might_sleep();
202 	raw_spin_lock_irqsave(&sem->lock, flags);
203 	if (likely(sem->count > 0))
204 		__sem_acquire(sem);
205 	else
206 		result = __down_timeout(sem, timeout);
207 	raw_spin_unlock_irqrestore(&sem->lock, flags);
208 
209 	return result;
210 }
211 EXPORT_SYMBOL(down_timeout);
212 
213 /**
214  * up - release the semaphore
215  * @sem: the semaphore to release
216  *
217  * Release the semaphore.  Unlike mutexes, up() may be called from any
218  * context and even by tasks which have never called down().
219  */
220 void __sched up(struct semaphore *sem)
221 {
222 	unsigned long flags;
223 	DEFINE_WAKE_Q(wake_q);
224 
225 	raw_spin_lock_irqsave(&sem->lock, flags);
226 
227 	hung_task_sem_clear_if_holder(sem);
228 
229 	if (likely(list_empty(&sem->wait_list)))
230 		sem->count++;
231 	else
232 		__up(sem, &wake_q);
233 	raw_spin_unlock_irqrestore(&sem->lock, flags);
234 	if (!wake_q_empty(&wake_q))
235 		wake_up_q(&wake_q);
236 }
237 EXPORT_SYMBOL(up);
238 
239 /* Functions for the contended case */
240 
241 struct semaphore_waiter {
242 	struct list_head list;
243 	struct task_struct *task;
244 	bool up;
245 };
246 
247 /*
248  * Because this function is inlined, the 'state' parameter will be
249  * constant, and thus optimised away by the compiler.  Likewise the
250  * 'timeout' parameter for the cases without timeouts.
251  */
252 static inline int __sched ___down_common(struct semaphore *sem, long state,
253 								long timeout)
254 {
255 	struct semaphore_waiter waiter;
256 
257 	list_add_tail(&waiter.list, &sem->wait_list);
258 	waiter.task = current;
259 	waiter.up = false;
260 
261 	for (;;) {
262 		if (signal_pending_state(state, current))
263 			goto interrupted;
264 		if (unlikely(timeout <= 0))
265 			goto timed_out;
266 		__set_current_state(state);
267 		raw_spin_unlock_irq(&sem->lock);
268 		timeout = schedule_timeout(timeout);
269 		raw_spin_lock_irq(&sem->lock);
270 		if (waiter.up) {
271 			hung_task_sem_set_holder(sem);
272 			return 0;
273 		}
274 	}
275 
276  timed_out:
277 	list_del(&waiter.list);
278 	return -ETIME;
279 
280  interrupted:
281 	list_del(&waiter.list);
282 	return -EINTR;
283 }
284 
285 static inline int __sched __down_common(struct semaphore *sem, long state,
286 					long timeout)
287 {
288 	int ret;
289 
290 	hung_task_set_blocker(sem, BLOCKER_TYPE_SEM);
291 
292 	trace_contention_begin(sem, 0);
293 	ret = ___down_common(sem, state, timeout);
294 	trace_contention_end(sem, ret);
295 
296 	hung_task_clear_blocker();
297 
298 	return ret;
299 }
300 
301 static noinline void __sched __down(struct semaphore *sem)
302 {
303 	__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
304 }
305 
306 static noinline int __sched __down_interruptible(struct semaphore *sem)
307 {
308 	return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
309 }
310 
311 static noinline int __sched __down_killable(struct semaphore *sem)
312 {
313 	return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
314 }
315 
316 static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
317 {
318 	return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
319 }
320 
321 static noinline void __sched __up(struct semaphore *sem,
322 				  struct wake_q_head *wake_q)
323 {
324 	struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
325 						struct semaphore_waiter, list);
326 	list_del(&waiter->list);
327 	waiter->up = true;
328 	wake_q_add(wake_q, waiter->task);
329 }
330