1 #ifndef _ASM_PARISC_FUTEX_H
2 #define _ASM_PARISC_FUTEX_H
3 
4 #ifdef __KERNEL__
5 
6 #include <linux/futex.h>
7 #include <linux/uaccess.h>
8 #include <asm/atomic.h>
9 #include <asm/errno.h>
10 
11 static inline int
futex_atomic_op_inuser(int encoded_op,u32 __user * uaddr)12 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
13 {
14 	unsigned long int flags;
15 	u32 val;
16 	int op = (encoded_op >> 28) & 7;
17 	int cmp = (encoded_op >> 24) & 15;
18 	int oparg = (encoded_op << 8) >> 20;
19 	int cmparg = (encoded_op << 20) >> 20;
20 	int oldval = 0, ret;
21 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
22 		oparg = 1 << oparg;
23 
24 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
25 		return -EFAULT;
26 
27 	pagefault_disable();
28 
29 	_atomic_spin_lock_irqsave(uaddr, flags);
30 
31 	switch (op) {
32 	case FUTEX_OP_SET:
33 		/* *(int *)UADDR2 = OPARG; */
34 		ret = get_user(oldval, uaddr);
35 		if (!ret)
36 			ret = put_user(oparg, uaddr);
37 		break;
38 	case FUTEX_OP_ADD:
39 		/* *(int *)UADDR2 += OPARG; */
40 		ret = get_user(oldval, uaddr);
41 		if (!ret) {
42 			val = oldval + oparg;
43 			ret = put_user(val, uaddr);
44 		}
45 		break;
46 	case FUTEX_OP_OR:
47 		/* *(int *)UADDR2 |= OPARG; */
48 		ret = get_user(oldval, uaddr);
49 		if (!ret) {
50 			val = oldval | oparg;
51 			ret = put_user(val, uaddr);
52 		}
53 		break;
54 	case FUTEX_OP_ANDN:
55 		/* *(int *)UADDR2 &= ~OPARG; */
56 		ret = get_user(oldval, uaddr);
57 		if (!ret) {
58 			val = oldval & ~oparg;
59 			ret = put_user(val, uaddr);
60 		}
61 		break;
62 	case FUTEX_OP_XOR:
63 		/* *(int *)UADDR2 ^= OPARG; */
64 		ret = get_user(oldval, uaddr);
65 		if (!ret) {
66 			val = oldval ^ oparg;
67 			ret = put_user(val, uaddr);
68 		}
69 		break;
70 	default:
71 		ret = -ENOSYS;
72 	}
73 
74 	_atomic_spin_unlock_irqrestore(uaddr, flags);
75 
76 	pagefault_enable();
77 
78 	if (!ret) {
79 		switch (cmp) {
80 		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
81 		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
82 		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
83 		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
84 		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
85 		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
86 		default: ret = -ENOSYS;
87 		}
88 	}
89 	return ret;
90 }
91 
92 /* Non-atomic version */
93 static inline int
futex_atomic_cmpxchg_inatomic(u32 * uval,u32 __user * uaddr,u32 oldval,u32 newval)94 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
95 			      u32 oldval, u32 newval)
96 {
97 	int ret;
98 	u32 val;
99 	unsigned long flags;
100 
101 	/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
102 	 * our gateway page, and causes no end of trouble...
103 	 */
104 	if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
105 		return -EFAULT;
106 
107 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
108 		return -EFAULT;
109 
110 	/* HPPA has no cmpxchg in hardware and therefore the
111 	 * best we can do here is use an array of locks. The
112 	 * lock selected is based on a hash of the userspace
113 	 * address. This should scale to a couple of CPUs.
114 	 */
115 
116 	_atomic_spin_lock_irqsave(uaddr, flags);
117 
118 	ret = get_user(val, uaddr);
119 
120 	if (!ret && val == oldval)
121 		ret = put_user(newval, uaddr);
122 
123 	*uval = val;
124 
125 	_atomic_spin_unlock_irqrestore(uaddr, flags);
126 
127 	return ret;
128 }
129 
130 #endif /*__KERNEL__*/
131 #endif /*_ASM_PARISC_FUTEX_H*/
132