1 /*
2  * Assembly implementation of the mutex fastpath, based on atomic
3  * decrement/increment.
4  *
5  * started by Ingo Molnar:
6  *
7  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8  */
9 #ifndef _ASM_X86_MUTEX_32_H
10 #define _ASM_X86_MUTEX_32_H
11 
12 #include <asm/alternative.h>
13 
14 /**
15  *  __mutex_fastpath_lock - try to take the lock by moving the count
16  *                          from 1 to a 0 value
17  *  @count: pointer of type atomic_t
18  *  @fn: function to call if the original value was not 1
19  *
20  * Change the count from 1 to a value lower than 1, and call <fn> if it
21  * wasn't 1 originally. This function MUST leave the value lower than 1
22  * even when the "1" assertion wasn't true.
23  */
24 #define __mutex_fastpath_lock(count, fail_fn)			\
25 do {								\
26 	unsigned int dummy;					\
27 								\
28 	typecheck(atomic_t *, count);				\
29 	typecheck_fn(void (*)(atomic_t *), fail_fn);		\
30 								\
31 	asm volatile(LOCK_PREFIX "   decl (%%eax)\n"		\
32 		     "   jns 1f	\n"				\
33 		     "   call " #fail_fn "\n"			\
34 		     "1:\n"					\
35 		     : "=a" (dummy)				\
36 		     : "a" (count)				\
37 		     : "memory", "ecx", "edx");			\
38 } while (0)
39 
40 
41 /**
42  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
43  *                                 from 1 to a 0 value
44  *  @count: pointer of type atomic_t
45  *  @fail_fn: function to call if the original value was not 1
46  *
47  * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
48  * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
49  * or anything the slow path function returns
50  */
__mutex_fastpath_lock_retval(atomic_t * count,int (* fail_fn)(atomic_t *))51 static inline int __mutex_fastpath_lock_retval(atomic_t *count,
52 					       int (*fail_fn)(atomic_t *))
53 {
54 	if (unlikely(atomic_dec_return(count) < 0))
55 		return fail_fn(count);
56 	else
57 		return 0;
58 }
59 
60 /**
61  *  __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
62  *  @count: pointer of type atomic_t
63  *  @fail_fn: function to call if the original value was not 0
64  *
65  * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
66  * In the failure case, this function is allowed to either set the value
67  * to 1, or to set it to a value lower than 1.
68  *
69  * If the implementation sets it to a value of lower than 1, the
70  * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
71  * to return 0 otherwise.
72  */
73 #define __mutex_fastpath_unlock(count, fail_fn)			\
74 do {								\
75 	unsigned int dummy;					\
76 								\
77 	typecheck(atomic_t *, count);				\
78 	typecheck_fn(void (*)(atomic_t *), fail_fn);		\
79 								\
80 	asm volatile(LOCK_PREFIX "   incl (%%eax)\n"		\
81 		     "   jg	1f\n"				\
82 		     "   call " #fail_fn "\n"			\
83 		     "1:\n"					\
84 		     : "=a" (dummy)				\
85 		     : "a" (count)				\
86 		     : "memory", "ecx", "edx");			\
87 } while (0)
88 
89 #define __mutex_slowpath_needs_to_unlock()	1
90 
91 /**
92  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
93  *
94  *  @count: pointer of type atomic_t
95  *  @fail_fn: fallback function
96  *
97  * Change the count from 1 to a value lower than 1, and return 0 (failure)
98  * if it wasn't 1 originally, or return 1 (success) otherwise. This function
99  * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
100  * Additionally, if the value was < 0 originally, this function must not leave
101  * it to 0 on failure.
102  */
__mutex_fastpath_trylock(atomic_t * count,int (* fail_fn)(atomic_t *))103 static inline int __mutex_fastpath_trylock(atomic_t *count,
104 					   int (*fail_fn)(atomic_t *))
105 {
106 	/*
107 	 * We have two variants here. The cmpxchg based one is the best one
108 	 * because it never induce a false contention state.  It is included
109 	 * here because architectures using the inc/dec algorithms over the
110 	 * xchg ones are much more likely to support cmpxchg natively.
111 	 *
112 	 * If not we fall back to the spinlock based variant - that is
113 	 * just as efficient (and simpler) as a 'destructive' probing of
114 	 * the mutex state would be.
115 	 */
116 #ifdef __HAVE_ARCH_CMPXCHG
117 	if (likely(atomic_cmpxchg(count, 1, 0) == 1))
118 		return 1;
119 	return 0;
120 #else
121 	return fail_fn(count);
122 #endif
123 }
124 
125 #endif /* _ASM_X86_MUTEX_32_H */
126