1 /*
2  * System level definitions for the Hexagon architecture
3  *
4  * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 and
8  * only version 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA.
19  */
20 
21 #ifndef _ASM_SYSTEM_H
22 #define _ASM_SYSTEM_H
23 
24 #include <linux/linkage.h>
25 #include <linux/irqflags.h>
26 #include <asm/atomic.h>
27 #include <asm/hexagon_vm.h>
28 
29 struct thread_struct;
30 
31 extern struct task_struct *__switch_to(struct task_struct *,
32 	struct task_struct *,
33 	struct task_struct *);
34 
35 #define switch_to(p, n, r) do {\
36 	r = __switch_to((p), (n), (r));\
37 } while (0)
38 
39 
40 #define rmb()				barrier()
41 #define read_barrier_depends()		barrier()
42 #define wmb()				barrier()
43 #define mb()				barrier()
44 #define smp_rmb()			barrier()
45 #define smp_read_barrier_depends()	barrier()
46 #define smp_wmb()			barrier()
47 #define smp_mb()			barrier()
48 #define smp_mb__before_atomic_dec()	barrier()
49 #define smp_mb__after_atomic_dec()	barrier()
50 #define smp_mb__before_atomic_inc()	barrier()
51 #define smp_mb__after_atomic_inc()	barrier()
52 
53 /*
54  * __xchg - atomically exchange a register and a memory location
55  * @x: value to swap
56  * @ptr: pointer to memory
57  * @size:  size of the value
58  *
59  * Only 4 bytes supported currently.
60  *
61  * Note:  there was an errata for V2 about .new's and memw_locked.
62  *
63  */
__xchg(unsigned long x,volatile void * ptr,int size)64 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
65 				   int size)
66 {
67 	unsigned long retval;
68 
69 	/*  Can't seem to use printk or panic here, so just stop  */
70 	if (size != 4) do { asm volatile("brkpt;\n"); } while (1);
71 
72 	__asm__ __volatile__ (
73 	"1:	%0 = memw_locked(%1);\n"    /*  load into retval */
74 	"	memw_locked(%1,P0) = %2;\n" /*  store into memory */
75 	"	if !P0 jump 1b;\n"
76 	: "=&r" (retval)
77 	: "r" (ptr), "r" (x)
78 	: "memory", "p0"
79 	);
80 	return retval;
81 }
82 
83 /*
84  * Atomically swap the contents of a register with memory.  Should be atomic
85  * between multiple CPU's and within interrupts on the same CPU.
86  */
87 #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
88 	sizeof(*(ptr))))
89 
90 /*  Set a value and use a memory barrier.  Used by the scheduler somewhere.  */
91 #define set_mb(var, value) \
92 	do { var = value; mb(); } while (0)
93 
94 /*
95  *  see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps.
96  *  looks just like atomic_cmpxchg on our arch currently with a bunch of
97  *  variable casting.
98  */
99 #define __HAVE_ARCH_CMPXCHG 1
100 
101 #define cmpxchg(ptr, old, new)					\
102 ({								\
103 	__typeof__(ptr) __ptr = (ptr);				\
104 	__typeof__(*(ptr)) __old = (old);			\
105 	__typeof__(*(ptr)) __new = (new);			\
106 	__typeof__(*(ptr)) __oldval = 0;			\
107 								\
108 	asm volatile(						\
109 		"1:	%0 = memw_locked(%1);\n"		\
110 		"	{ P0 = cmp.eq(%0,%2);\n"		\
111 		"	  if (!P0.new) jump:nt 2f; }\n"		\
112 		"	memw_locked(%1,p0) = %3;\n"		\
113 		"	if (!P0) jump 1b;\n"			\
114 		"2:\n"						\
115 		: "=&r" (__oldval)				\
116 		: "r" (__ptr), "r" (__old), "r" (__new)		\
117 		: "memory", "p0"				\
118 	);							\
119 	__oldval;						\
120 })
121 
122 /*  Should probably shoot for an 8-byte aligned stack pointer  */
123 #define STACK_MASK (~7)
124 #define arch_align_stack(x) (x & STACK_MASK)
125 
126 #endif
127