1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Hardware spinlock public header
4  *
5  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6  *
7  * Contact: Ohad Ben-Cohen <ohad@wizery.com>
8  */
9 
10 #ifndef __LINUX_HWSPINLOCK_H
11 #define __LINUX_HWSPINLOCK_H
12 
13 #include <linux/err.h>
14 #include <linux/sched.h>
15 
16 /* hwspinlock mode argument */
17 #define HWLOCK_IRQSTATE		0x01 /* Disable interrupts, save state */
18 #define HWLOCK_IRQ		0x02 /* Disable interrupts, don't save state */
19 #define HWLOCK_RAW		0x03
20 #define HWLOCK_IN_ATOMIC	0x04 /* Called while in atomic context */
21 
22 struct device;
23 struct device_node;
24 struct hwspinlock;
25 struct hwspinlock_device;
26 struct hwspinlock_ops;
27 
28 /**
29  * struct hwspinlock_pdata - platform data for hwspinlock drivers
30  * @base_id: base id for this hwspinlock device
31  *
32  * hwspinlock devices provide system-wide hardware locks that are used
33  * by remote processors that have no other way to achieve synchronization.
34  *
35  * To achieve that, each physical lock must have a system-wide id number
36  * that is agreed upon, otherwise remote processors can't possibly assume
37  * they're using the same hardware lock.
38  *
39  * Usually boards have a single hwspinlock device, which provides several
40  * hwspinlocks, and in this case, they can be trivially numbered 0 to
41  * (num-of-locks - 1).
42  *
43  * In case boards have several hwspinlocks devices, a different base id
44  * should be used for each hwspinlock device (they can't all use 0 as
45  * a starting id!).
46  *
47  * This platform data structure should be used to provide the base id
48  * for each device (which is trivially 0 when only a single hwspinlock
49  * device exists). It can be shared between different platforms, hence
50  * its location.
51  */
52 struct hwspinlock_pdata {
53 	int base_id;
54 };
55 
56 #ifdef CONFIG_HWSPINLOCK
57 
58 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
59 		const struct hwspinlock_ops *ops, int base_id, int num_locks);
60 int hwspin_lock_unregister(struct hwspinlock_device *bank);
61 struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
62 int hwspin_lock_free(struct hwspinlock *hwlock);
63 int of_hwspin_lock_get_id(struct device_node *np, int index);
64 int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
65 							unsigned long *);
66 int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
67 void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
68 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
69 int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
70 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
71 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
72 						     unsigned int id);
73 int devm_hwspin_lock_unregister(struct device *dev,
74 				struct hwspinlock_device *bank);
75 int devm_hwspin_lock_register(struct device *dev,
76 			      struct hwspinlock_device *bank,
77 			      const struct hwspinlock_ops *ops,
78 			      int base_id, int num_locks);
79 
80 #else /* !CONFIG_HWSPINLOCK */
81 
82 /*
83  * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
84  * enabled. We prefer to silently succeed in this case, and let the
85  * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
86  * required on a given setup, users will still work.
87  *
88  * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
89  * we _do_ want users to fail (no point in registering hwspinlock instances if
90  * the framework is not available).
91  *
92  * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
93  * users. Others, which care, can still check this with IS_ERR.
94  */
hwspin_lock_request_specific(unsigned int id)95 static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
96 {
97 	return ERR_PTR(-ENODEV);
98 }
99 
hwspin_lock_free(struct hwspinlock * hwlock)100 static inline int hwspin_lock_free(struct hwspinlock *hwlock)
101 {
102 	return 0;
103 }
104 
105 static inline
__hwspin_lock_timeout(struct hwspinlock * hwlock,unsigned int to,int mode,unsigned long * flags)106 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
107 					int mode, unsigned long *flags)
108 {
109 	return 0;
110 }
111 
112 static inline
__hwspin_trylock(struct hwspinlock * hwlock,int mode,unsigned long * flags)113 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
114 {
115 	return 0;
116 }
117 
118 static inline
__hwspin_unlock(struct hwspinlock * hwlock,int mode,unsigned long * flags)119 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
120 {
121 }
122 
hwspin_lock_bust(struct hwspinlock * hwlock,unsigned int id)123 static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
124 {
125 	return 0;
126 }
127 
of_hwspin_lock_get_id(struct device_node * np,int index)128 static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
129 {
130 	return 0;
131 }
132 
133 static inline
of_hwspin_lock_get_id_byname(struct device_node * np,const char * name)134 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
135 {
136 	return 0;
137 }
138 
139 static inline
devm_hwspin_lock_free(struct device * dev,struct hwspinlock * hwlock)140 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
141 {
142 	return 0;
143 }
144 
145 static inline
devm_hwspin_lock_request_specific(struct device * dev,unsigned int id)146 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
147 						     unsigned int id)
148 {
149 	return ERR_PTR(-ENODEV);
150 }
151 
152 #endif /* !CONFIG_HWSPINLOCK */
153 
154 /**
155  * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
156  * @hwlock: an hwspinlock which we want to trylock
157  * @flags: a pointer to where the caller's interrupt state will be saved at
158  *
159  * This function attempts to lock the underlying hwspinlock, and will
160  * immediately fail if the hwspinlock is already locked.
161  *
162  * Upon a successful return from this function, preemption and local
163  * interrupts are disabled (previous interrupts state is saved at @flags),
164  * so the caller must not sleep, and is advised to release the hwspinlock
165  * as soon as possible.
166  *
167  * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
168  * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
169  */
170 static inline
hwspin_trylock_irqsave(struct hwspinlock * hwlock,unsigned long * flags)171 int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
172 {
173 	return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
174 }
175 
176 /**
177  * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
178  * @hwlock: an hwspinlock which we want to trylock
179  *
180  * This function attempts to lock the underlying hwspinlock, and will
181  * immediately fail if the hwspinlock is already locked.
182  *
183  * Upon a successful return from this function, preemption and local
184  * interrupts are disabled, so the caller must not sleep, and is advised
185  * to release the hwspinlock as soon as possible.
186  *
187  * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
188  * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
189  */
hwspin_trylock_irq(struct hwspinlock * hwlock)190 static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
191 {
192 	return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
193 }
194 
195 /**
196  * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
197  * @hwlock: an hwspinlock which we want to trylock
198  *
199  * This function attempts to lock an hwspinlock, and will immediately fail
200  * if the hwspinlock is already taken.
201  *
202  * Caution: User must protect the routine of getting hardware lock with mutex
203  * or spinlock to avoid dead-lock, that will let user can do some time-consuming
204  * or sleepable operations under the hardware lock.
205  *
206  * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
207  * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
208  */
hwspin_trylock_raw(struct hwspinlock * hwlock)209 static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
210 {
211 	return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
212 }
213 
214 /**
215  * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
216  * @hwlock: an hwspinlock which we want to trylock
217  *
218  * This function attempts to lock an hwspinlock, and will immediately fail
219  * if the hwspinlock is already taken.
220  *
221  * This function shall be called only from an atomic context.
222  *
223  * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
224  * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
225  */
hwspin_trylock_in_atomic(struct hwspinlock * hwlock)226 static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
227 {
228 	return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
229 }
230 
231 /**
232  * hwspin_trylock() - attempt to lock a specific hwspinlock
233  * @hwlock: an hwspinlock which we want to trylock
234  *
235  * This function attempts to lock an hwspinlock, and will immediately fail
236  * if the hwspinlock is already taken.
237  *
238  * Upon a successful return from this function, preemption is disabled,
239  * so the caller must not sleep, and is advised to release the hwspinlock
240  * as soon as possible. This is required in order to minimize remote cores
241  * polling on the hardware interconnect.
242  *
243  * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
244  * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
245  */
hwspin_trylock(struct hwspinlock * hwlock)246 static inline int hwspin_trylock(struct hwspinlock *hwlock)
247 {
248 	return __hwspin_trylock(hwlock, 0, NULL);
249 }
250 
251 /**
252  * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
253  * @hwlock: the hwspinlock to be locked
254  * @to: timeout value in msecs
255  * @flags: a pointer to where the caller's interrupt state will be saved at
256  *
257  * This function locks the underlying @hwlock. If the @hwlock
258  * is already taken, the function will busy loop waiting for it to
259  * be released, but give up when @timeout msecs have elapsed.
260  *
261  * Upon a successful return from this function, preemption and local interrupts
262  * are disabled (plus previous interrupt state is saved), so the caller must
263  * not sleep, and is advised to release the hwspinlock as soon as possible.
264  *
265  * Returns 0 when the @hwlock was successfully taken, and an appropriate
266  * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
267  * busy after @timeout msecs). The function will never sleep.
268  */
hwspin_lock_timeout_irqsave(struct hwspinlock * hwlock,unsigned int to,unsigned long * flags)269 static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
270 				unsigned int to, unsigned long *flags)
271 {
272 	return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
273 }
274 
275 /**
276  * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
277  * @hwlock: the hwspinlock to be locked
278  * @to: timeout value in msecs
279  *
280  * This function locks the underlying @hwlock. If the @hwlock
281  * is already taken, the function will busy loop waiting for it to
282  * be released, but give up when @timeout msecs have elapsed.
283  *
284  * Upon a successful return from this function, preemption and local interrupts
285  * are disabled so the caller must not sleep, and is advised to release the
286  * hwspinlock as soon as possible.
287  *
288  * Returns 0 when the @hwlock was successfully taken, and an appropriate
289  * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
290  * busy after @timeout msecs). The function will never sleep.
291  */
292 static inline
hwspin_lock_timeout_irq(struct hwspinlock * hwlock,unsigned int to)293 int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
294 {
295 	return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
296 }
297 
298 /**
299  * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
300  * @hwlock: the hwspinlock to be locked
301  * @to: timeout value in msecs
302  *
303  * This function locks the underlying @hwlock. If the @hwlock
304  * is already taken, the function will busy loop waiting for it to
305  * be released, but give up when @timeout msecs have elapsed.
306  *
307  * Caution: User must protect the routine of getting hardware lock with mutex
308  * or spinlock to avoid dead-lock, that will let user can do some time-consuming
309  * or sleepable operations under the hardware lock.
310  *
311  * Returns 0 when the @hwlock was successfully taken, and an appropriate
312  * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
313  * busy after @timeout msecs). The function will never sleep.
314  */
315 static inline
hwspin_lock_timeout_raw(struct hwspinlock * hwlock,unsigned int to)316 int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
317 {
318 	return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
319 }
320 
321 /**
322  * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
323  * @hwlock: the hwspinlock to be locked
324  * @to: timeout value in msecs
325  *
326  * This function locks the underlying @hwlock. If the @hwlock
327  * is already taken, the function will busy loop waiting for it to
328  * be released, but give up when @timeout msecs have elapsed.
329  *
330  * This function shall be called only from an atomic context and the timeout
331  * value shall not exceed a few msecs.
332  *
333  * Returns 0 when the @hwlock was successfully taken, and an appropriate
334  * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
335  * busy after @timeout msecs). The function will never sleep.
336  */
337 static inline
hwspin_lock_timeout_in_atomic(struct hwspinlock * hwlock,unsigned int to)338 int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
339 {
340 	return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
341 }
342 
343 /**
344  * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
345  * @hwlock: the hwspinlock to be locked
346  * @to: timeout value in msecs
347  *
348  * This function locks the underlying @hwlock. If the @hwlock
349  * is already taken, the function will busy loop waiting for it to
350  * be released, but give up when @timeout msecs have elapsed.
351  *
352  * Upon a successful return from this function, preemption is disabled
353  * so the caller must not sleep, and is advised to release the hwspinlock
354  * as soon as possible.
355  * This is required in order to minimize remote cores polling on the
356  * hardware interconnect.
357  *
358  * Returns 0 when the @hwlock was successfully taken, and an appropriate
359  * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
360  * busy after @timeout msecs). The function will never sleep.
361  */
362 static inline
hwspin_lock_timeout(struct hwspinlock * hwlock,unsigned int to)363 int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
364 {
365 	return __hwspin_lock_timeout(hwlock, to, 0, NULL);
366 }
367 
368 /**
369  * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
370  * @hwlock: a previously-acquired hwspinlock which we want to unlock
371  * @flags: previous caller's interrupt state to restore
372  *
373  * This function will unlock a specific hwspinlock, enable preemption and
374  * restore the previous state of the local interrupts. It should be used
375  * to undo, e.g., hwspin_trylock_irqsave().
376  *
377  * @hwlock must be already locked before calling this function: it is a bug
378  * to call unlock on a @hwlock that is already unlocked.
379  */
hwspin_unlock_irqrestore(struct hwspinlock * hwlock,unsigned long * flags)380 static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
381 							unsigned long *flags)
382 {
383 	__hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
384 }
385 
386 /**
387  * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
388  * @hwlock: a previously-acquired hwspinlock which we want to unlock
389  *
390  * This function will unlock a specific hwspinlock, enable preemption and
391  * enable local interrupts. Should be used to undo hwspin_lock_irq().
392  *
393  * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
394  * calling this function: it is a bug to call unlock on a @hwlock that is
395  * already unlocked.
396  */
hwspin_unlock_irq(struct hwspinlock * hwlock)397 static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
398 {
399 	__hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
400 }
401 
402 /**
403  * hwspin_unlock_raw() - unlock hwspinlock
404  * @hwlock: a previously-acquired hwspinlock which we want to unlock
405  *
406  * This function will unlock a specific hwspinlock.
407  *
408  * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
409  * this function: it is a bug to call unlock on a @hwlock that is already
410  * unlocked.
411  */
hwspin_unlock_raw(struct hwspinlock * hwlock)412 static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
413 {
414 	__hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
415 }
416 
417 /**
418  * hwspin_unlock_in_atomic() - unlock hwspinlock
419  * @hwlock: a previously-acquired hwspinlock which we want to unlock
420  *
421  * This function will unlock a specific hwspinlock.
422  *
423  * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
424  * this function: it is a bug to call unlock on a @hwlock that is already
425  * unlocked.
426  */
hwspin_unlock_in_atomic(struct hwspinlock * hwlock)427 static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
428 {
429 	__hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
430 }
431 
432 /**
433  * hwspin_unlock() - unlock hwspinlock
434  * @hwlock: a previously-acquired hwspinlock which we want to unlock
435  *
436  * This function will unlock a specific hwspinlock and enable preemption
437  * back.
438  *
439  * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
440  * this function: it is a bug to call unlock on a @hwlock that is already
441  * unlocked.
442  */
hwspin_unlock(struct hwspinlock * hwlock)443 static inline void hwspin_unlock(struct hwspinlock *hwlock)
444 {
445 	__hwspin_unlock(hwlock, 0, NULL);
446 }
447 
448 #endif /* __LINUX_HWSPINLOCK_H */
449