xref: /linux/arch/parisc/kernel/syscall.S (revision 0905809b38bda1fa0b206986c44d846e46f13c1d)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * System call entry code / Linux gateway page
5 * Copyright (c) Matthew Wilcox 1999 <willy@infradead.org>
6 * Licensed under the GNU GPL.
7 * thanks to Philipp Rumpf, Mike Shaver and various others
8 * sorry about the wall, puffin..
9 */
10
11/*
12How does the Linux gateway page on PA-RISC work?
13------------------------------------------------
14The Linux gateway page on PA-RISC is "special".
15It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc
16terminology it's Execute, promote to PL0) in the page map.  So anything
17executing on this page executes with kernel level privilege (there's more to it
18than that: to have this happen, you also have to use a branch with a ,gate
19completer to activate the privilege promotion).  The upshot is that everything
20that runs on the gateway page runs at kernel privilege but with the current
21user process address space (although you have access to kernel space via %sr2).
22For the 0x100 syscall entry, we redo the space registers to point to the kernel
23address space (preserving the user address space in %sr3), move to wide mode if
24required, save the user registers and branch into the kernel syscall entry
25point.  For all the other functions, we execute at kernel privilege but don't
26flip address spaces. The basic upshot of this is that these code snippets are
27executed atomically (because the kernel can't be pre-empted) and they may
28perform architecturally forbidden (to PL3) operations (like setting control
29registers).
30*/
31
32
33#include <asm/asm-offsets.h>
34#include <asm/unistd.h>
35#include <asm/errno.h>
36#include <asm/page.h>
37#include <asm/psw.h>
38#include <asm/thread_info.h>
39#include <asm/assembly.h>
40#include <asm/processor.h>
41#include <asm/cache.h>
42#include <asm/spinlock_types.h>
43
44#include <linux/linkage.h>
45
46	/* We fill the empty parts of the gateway page with
47 	 * something that will kill the kernel or a
48 	 * userspace application.
49	 */
50#define KILL_INSN	break	0,0
51
52	.level          PA_ASM_LEVEL
53
54	.macro	lws_pagefault_disable reg1,reg2
55	mfctl	%cr30, \reg2
56	ldo	TASK_PAGEFAULT_DISABLED(\reg2), \reg2
57	ldw	0(%sr2,\reg2), \reg1
58	ldo	1(\reg1), \reg1
59	stw	\reg1, 0(%sr2,\reg2)
60	.endm
61
62	.macro	lws_pagefault_enable reg1,reg2
63	mfctl	%cr30, \reg2
64	ldo	TASK_PAGEFAULT_DISABLED(\reg2), \reg2
65	ldw	0(%sr2,\reg2), \reg1
66	ldo	-1(\reg1), \reg1
67	stw	\reg1, 0(%sr2,\reg2)
68	.endm
69
70	/* raise exception if spinlock content is not zero or
71	 * __ARCH_SPIN_LOCK_UNLOCKED_VAL */
72	.macro	spinlock_check spin_val,tmpreg
73#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
74	ldi	__ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmpreg
75	andcm,=	\spin_val, \tmpreg, %r0
76	.word	SPINLOCK_BREAK_INSN
77#endif
78	.endm
79
80	.text
81
82	.import syscall_exit,code
83	.import syscall_exit_rfi,code
84
85	/* Linux gateway page is aliased to virtual page 0 in the kernel
86	 * address space. Since it is a gateway page it cannot be
87	 * dereferenced, so null pointers will still fault. We start
88	 * the actual entry point at 0x100. We put break instructions
89	 * at the beginning of the page to trap null indirect function
90	 * pointers.
91	 */
92
93	.align PAGE_SIZE
94ENTRY(linux_gateway_page)
95
96        /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
97	.rept 44
98	KILL_INSN
99	.endr
100
101	/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
102	/* Light-weight-syscall entry must always be located at 0xb0 */
103	/* WARNING: Keep this number updated with table size changes */
104#define __NR_lws_entries (5)
105
106lws_entry:
107	gate	lws_start, %r0		/* increase privilege */
108	depi	PRIV_USER, 31, 2, %r31	/* Ensure we return into user mode. */
109
110	/* Fill from 0xb8 to 0xe0 */
111	.rept 10
112	KILL_INSN
113	.endr
114
115	/* This function MUST be located at 0xe0 for glibc's threading
116	mechanism to work. DO NOT MOVE THIS CODE EVER! */
117set_thread_pointer:
118	gate	.+8, %r0		/* increase privilege */
119	depi	PRIV_USER, 31, 2, %r31	/* Ensure we return into user mode. */
120	be	0(%sr7,%r31)		/* return to user space */
121	mtctl	%r26, %cr27		/* move arg0 to the control register */
122
123	/* Increase the chance of trapping if random jumps occur to this
124	address, fill from 0xf0 to 0x100 */
125	.rept 4
126	KILL_INSN
127	.endr
128
129/* This address must remain fixed at 0x100 for glibc's syscalls to work */
130	.align LINUX_GATEWAY_ADDR
131linux_gateway_entry:
132	gate	.+8, %r0			/* become privileged */
133	mtsp	%r0,%sr4			/* get kernel space into sr4 */
134	mtsp	%r0,%sr5			/* get kernel space into sr5 */
135	mtsp	%r0,%sr6			/* get kernel space into sr6 */
136
137#ifdef CONFIG_64BIT
138	/* Store W bit on entry to the syscall in case it's a wide userland
139	 * process. */
140	ssm	PSW_SM_W, %r1
141	extrd,u	%r1,PSW_W_BIT,1,%r1
142	/* sp must be aligned on 4, so deposit the W bit setting into
143	 * the bottom of sp temporarily */
144	or,ev	%r1,%r30,%r30
145	b,n	1f
146	/* The top halves of argument registers must be cleared on syscall
147	 * entry from narrow executable.
148	 */
149	depdi	0, 31, 32, %r26
150	depdi	0, 31, 32, %r25
151	depdi	0, 31, 32, %r24
152	depdi	0, 31, 32, %r23
153	depdi	0, 31, 32, %r22
154	depdi	0, 31, 32, %r21
1551:
156#endif
157
158	/* We use a rsm/ssm pair to prevent sr3 from being clobbered
159	 * by external interrupts.
160	 */
161	mfsp    %sr7,%r1                        /* save user sr7 */
162	rsm	PSW_SM_I, %r0			/* disable interrupts */
163	mtsp    %r1,%sr3                        /* and store it in sr3 */
164
165	mfctl   %cr30,%r1
166	xor     %r1,%r30,%r30                   /* ye olde xor trick */
167	xor     %r1,%r30,%r1
168	xor     %r1,%r30,%r30
169
170	LDREG	TASK_STACK(%r30),%r30		/* set up kernel stack */
171	ldo	FRAME_SIZE(%r30),%r30
172	/* N.B.: It is critical that we don't set sr7 to 0 until r30
173	 *       contains a valid kernel stack pointer. It is also
174	 *       critical that we don't start using the kernel stack
175	 *       until after sr7 has been set to 0.
176	 */
177
178	mtsp	%r0,%sr7			/* get kernel space into sr7 */
179	ssm	PSW_SM_I, %r0			/* enable interrupts */
180	STREGM	%r1,FRAME_SIZE(%r30)		/* save r1 (usp) here for now */
181	mfctl	%cr30,%r1			/* get task ptr in %r1 */
182
183	/* Save some registers for sigcontext and potential task
184	   switch (see entry.S for the details of which ones are
185	   saved/restored).  TASK_PT_PSW is zeroed so we can see whether
186	   a process is on a syscall or not.  For an interrupt the real
187	   PSW value is stored.  This is needed for gdb and sys_ptrace. */
188	STREG	%r0,  TASK_PT_PSW(%r1)
189	STREG	%r2,  TASK_PT_GR2(%r1)		/* preserve rp */
190	STREG	%r19, TASK_PT_GR19(%r1)
191
192	LDREGM	-FRAME_SIZE(%r30), %r2		/* get users sp back */
193#ifdef CONFIG_64BIT
194	extrd,u	%r2,63,1,%r19			/* W hidden in bottom bit */
195#if 0
196	xor	%r19,%r2,%r2			/* clear bottom bit */
197	depd,z	%r19,1,1,%r19
198	std	%r19,TASK_PT_PSW(%r1)
199#endif
200#endif
201	STREG	%r2,  TASK_PT_GR30(%r1)		/* ... and save it */
202
203	STREG	%r20, TASK_PT_GR20(%r1)		/* Syscall number */
204	STREG	%r21, TASK_PT_GR21(%r1)
205	STREG	%r22, TASK_PT_GR22(%r1)
206	STREG	%r23, TASK_PT_GR23(%r1)		/* 4th argument */
207	STREG	%r24, TASK_PT_GR24(%r1)		/* 3rd argument */
208	STREG	%r25, TASK_PT_GR25(%r1)		/* 2nd argument */
209	STREG	%r26, TASK_PT_GR26(%r1)	 	/* 1st argument */
210	STREG	%r27, TASK_PT_GR27(%r1)		/* user dp */
211	STREG   %r28, TASK_PT_GR28(%r1)         /* return value 0 */
212	STREG   %r0, TASK_PT_ORIG_R28(%r1)      /* don't prohibit restarts */
213	STREG	%r29, TASK_PT_GR29(%r1)		/* return value 1 */
214	STREG	%r31, TASK_PT_GR31(%r1)		/* preserve syscall return ptr */
215
216	ldo	TASK_PT_FR0(%r1), %r27		/* save fpregs from the kernel */
217	save_fp	%r27				/* or potential task switch  */
218
219	mfctl	%cr11, %r27			/* i.e. SAR */
220	STREG	%r27, TASK_PT_SAR(%r1)
221
222	loadgp
223
224#ifdef CONFIG_64BIT
225	ldo	-16(%r30),%r29			/* Reference param save area */
226	copy	%r19,%r2			/* W bit back to r2 */
227#else
228	/* no need to save these on stack in wide mode because the first 8
229	 * args are passed in registers */
230	stw     %r22, -52(%r30)                 /* 5th argument */
231	stw     %r21, -56(%r30)                 /* 6th argument */
232#endif
233
234	/* Are we being ptraced? */
235	mfctl	%cr30, %r1
236	LDREG	TASK_TI_FLAGS(%r1),%r1
237	ldi	_TIF_SYSCALL_TRACE_MASK, %r19
238	and,COND(=) %r1, %r19, %r0
239	b,n	.Ltracesys
240
241	/* Note!  We cannot use the syscall table that is mapped
242	nearby since the gateway page is mapped execute-only. */
243
244#ifdef CONFIG_64BIT
245	ldil	L%sys_call_table, %r1
246	or,ev	%r2,%r2,%r2
247	ldil	L%sys_call_table64, %r1
248	ldo	R%sys_call_table(%r1), %r19
249	or,ev	%r2,%r2,%r2
250	ldo	R%sys_call_table64(%r1), %r19
251#else
252	load32	sys_call_table, %r19
253#endif
254	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
255	b,n	.Lsyscall_nosys
256
257	LDREGX  %r20(%r19), %r19
258
259	/* If this is a sys_rt_sigreturn call, and the signal was received
260	 * when not in_syscall, then we want to return via syscall_exit_rfi,
261	 * not syscall_exit.  Signal no. in r20, in_syscall in r25 (see
262	 * trampoline code in signal.c).
263	 */
264	ldi	__NR_rt_sigreturn,%r2
265	comb,=	%r2,%r20,.Lrt_sigreturn
266.Lin_syscall:
267	ldil	L%syscall_exit,%r2
268	be      0(%sr7,%r19)
269	ldo	R%syscall_exit(%r2),%r2
270.Lrt_sigreturn:
271	comib,<> 0,%r25,.Lin_syscall
272	ldil	L%syscall_exit_rfi,%r2
273	be      0(%sr7,%r19)
274	ldo	R%syscall_exit_rfi(%r2),%r2
275
276	/* Note!  Because we are not running where we were linked, any
277	calls to functions external to this file must be indirect.  To
278	be safe, we apply the opposite rule to functions within this
279	file, with local labels given to them to ensure correctness. */
280
281.Lsyscall_nosys:
282syscall_nosys:
283	ldil	L%syscall_exit,%r1
284	be	R%syscall_exit(%sr7,%r1)
285	ldo	-ENOSYS(%r0),%r28		   /* set errno */
286
287
288/* Warning! This trace code is a virtual duplicate of the code above so be
289 * sure to maintain both! */
290.Ltracesys:
291tracesys:
292	/* Need to save more registers so the debugger can see where we
293	 * are.  This saves only the lower 8 bits of PSW, so that the C
294	 * bit is still clear on syscalls, and the D bit is set if this
295	 * full register save path has been executed.  We check the D
296	 * bit on syscall_return_rfi to determine which registers to
297	 * restore.  An interrupt results in a full PSW saved with the
298	 * C bit set, a non-straced syscall entry results in C and D clear
299	 * in the saved PSW.
300	 */
301	mfctl	%cr30,%r1			/* get task ptr */
302	ssm	0,%r2
303	STREG	%r2,TASK_PT_PSW(%r1)		/* Lower 8 bits only!! */
304	mfsp	%sr0,%r2
305	STREG	%r2,TASK_PT_SR0(%r1)
306	mfsp	%sr1,%r2
307	STREG	%r2,TASK_PT_SR1(%r1)
308	mfsp	%sr2,%r2
309	STREG	%r2,TASK_PT_SR2(%r1)
310	mfsp	%sr3,%r2
311	STREG	%r2,TASK_PT_SR3(%r1)
312	STREG	%r2,TASK_PT_SR4(%r1)
313	STREG	%r2,TASK_PT_SR5(%r1)
314	STREG	%r2,TASK_PT_SR6(%r1)
315	STREG	%r2,TASK_PT_SR7(%r1)
316	STREG	%r2,TASK_PT_IASQ0(%r1)
317	STREG	%r2,TASK_PT_IASQ1(%r1)
318	LDREG	TASK_PT_GR31(%r1),%r2
319	STREG	%r2,TASK_PT_IAOQ0(%r1)
320	ldo	4(%r2),%r2
321	STREG	%r2,TASK_PT_IAOQ1(%r1)
322	ldo	TASK_REGS(%r1),%r2
323	/* reg_save %r2 */
324	STREG	%r3,PT_GR3(%r2)
325	STREG	%r4,PT_GR4(%r2)
326	STREG	%r5,PT_GR5(%r2)
327	STREG	%r6,PT_GR6(%r2)
328	STREG	%r7,PT_GR7(%r2)
329	STREG	%r8,PT_GR8(%r2)
330	STREG	%r9,PT_GR9(%r2)
331	STREG	%r10,PT_GR10(%r2)
332	STREG	%r11,PT_GR11(%r2)
333	STREG	%r12,PT_GR12(%r2)
334	STREG	%r13,PT_GR13(%r2)
335	STREG	%r14,PT_GR14(%r2)
336	STREG	%r15,PT_GR15(%r2)
337	STREG	%r16,PT_GR16(%r2)
338	STREG	%r17,PT_GR17(%r2)
339	STREG	%r18,PT_GR18(%r2)
340	/* Finished saving things for the debugger */
341
342	copy	%r2,%r26
343	ldil	L%do_syscall_trace_enter,%r1
344	ldil	L%tracesys_next,%r2
345	be	R%do_syscall_trace_enter(%sr7,%r1)
346	ldo	R%tracesys_next(%r2),%r2
347
348tracesys_next:
349	/* do_syscall_trace_enter either returned the syscallno, or -1L,
350	 *  so we skip restoring the PT_GR20 below, since we pulled it from
351	 *  task->thread.regs.gr[20] above.
352	 */
353	copy	%ret0,%r20
354
355	mfctl	%cr30,%r1			/* get task ptr */
356	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return value */
357	LDREG   TASK_PT_GR26(%r1), %r26		/* Restore the users args */
358	LDREG   TASK_PT_GR25(%r1), %r25
359	LDREG   TASK_PT_GR24(%r1), %r24
360	LDREG   TASK_PT_GR23(%r1), %r23
361	LDREG   TASK_PT_GR22(%r1), %r22
362	LDREG   TASK_PT_GR21(%r1), %r21
363#ifdef CONFIG_64BIT
364	ldo	-16(%r30),%r29			/* Reference param save area */
365#else
366	stw     %r22, -52(%r30)                 /* 5th argument */
367	stw     %r21, -56(%r30)                 /* 6th argument */
368#endif
369
370	cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
371	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
372	b,n	.Ltracesys_nosys
373
374	/* Note!  We cannot use the syscall table that is mapped
375	nearby since the gateway page is mapped execute-only. */
376
377#ifdef CONFIG_64BIT
378	LDREG	TASK_PT_GR30(%r1), %r19		/* get users sp back */
379	extrd,u	%r19,63,1,%r2			/* W hidden in bottom bit */
380
381	ldil	L%sys_call_table, %r1
382	or,ev	%r2,%r2,%r2
383	ldil	L%sys_call_table64, %r1
384	ldo	R%sys_call_table(%r1), %r19
385	or,ev	%r2,%r2,%r2
386	ldo	R%sys_call_table64(%r1), %r19
387#else
388	load32	sys_call_table, %r19
389#endif
390
391	LDREGX  %r20(%r19), %r19
392
393	/* If this is a sys_rt_sigreturn call, and the signal was received
394	 * when not in_syscall, then we want to return via syscall_exit_rfi,
395	 * not syscall_exit.  Signal no. in r20, in_syscall in r25 (see
396	 * trampoline code in signal.c).
397	 */
398	ldi	__NR_rt_sigreturn,%r2
399	comb,=	%r2,%r20,.Ltrace_rt_sigreturn
400.Ltrace_in_syscall:
401	ldil	L%tracesys_exit,%r2
402	be      0(%sr7,%r19)
403	ldo	R%tracesys_exit(%r2),%r2
404
405.Ltracesys_nosys:
406	ldo	-ENOSYS(%r0),%r28		/* set errno */
407
408	/* Do *not* call this function on the gateway page, because it
409	makes a direct call to syscall_trace. */
410
411tracesys_exit:
412	mfctl	%cr30,%r1			/* get task ptr */
413#ifdef CONFIG_64BIT
414	ldo	-16(%r30),%r29			/* Reference param save area */
415#endif
416	ldo	TASK_REGS(%r1),%r26
417	BL	do_syscall_trace_exit,%r2
418	STREG   %r28,TASK_PT_GR28(%r1)          /* save return value now */
419	mfctl	%cr30,%r1			/* get task ptr */
420	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return val. */
421
422	ldil	L%syscall_exit,%r1
423	be,n	R%syscall_exit(%sr7,%r1)
424
425.Ltrace_rt_sigreturn:
426	comib,<> 0,%r25,.Ltrace_in_syscall
427	ldil	L%tracesys_sigexit,%r2
428	be      0(%sr7,%r19)
429	ldo	R%tracesys_sigexit(%r2),%r2
430
431tracesys_sigexit:
432	mfctl	%cr30,%r1			/* get task ptr */
433#ifdef CONFIG_64BIT
434	ldo	-16(%r30),%r29			/* Reference param save area */
435#endif
436	BL	do_syscall_trace_exit,%r2
437	ldo	TASK_REGS(%r1),%r26
438
439	ldil	L%syscall_exit_rfi,%r1
440	be,n	R%syscall_exit_rfi(%sr7,%r1)
441
442
443	/*********************************************************
444		32/64-bit Light-Weight-Syscall ABI
445
446		* - Indicates a hint for userspace inline asm
447		implementations.
448
449		Syscall number (caller-saves)
450	        - %r20
451	        * In asm clobber.
452
453		Argument registers (caller-saves)
454	        - %r26, %r25, %r24, %r23, %r22
455	        * In asm input.
456
457		Return registers (caller-saves)
458	        - %r28 (return), %r21 (errno)
459	        * In asm output.
460
461		Caller-saves registers
462	        - %r1, %r27, %r29
463	        - %r2 (return pointer)
464	        - %r31 (ble link register)
465	        * In asm clobber.
466
467		Callee-saves registers
468	        - %r3-%r18
469	        - %r30 (stack pointer)
470	        * Not in asm clobber.
471
472		If userspace is 32-bit:
473		Callee-saves registers
474	        - %r19 (32-bit PIC register)
475
476		Differences from 32-bit calling convention:
477		- Syscall number in %r20
478		- Additional argument register %r22 (arg4)
479		- Callee-saves %r19.
480
481		If userspace is 64-bit:
482		Callee-saves registers
483		- %r27 (64-bit PIC register)
484
485		Differences from 64-bit calling convention:
486		- Syscall number in %r20
487		- Additional argument register %r22 (arg4)
488		- Callee-saves %r27.
489
490		Error codes returned by entry path:
491
492		ENOSYS - r20 was an invalid LWS number.
493
494	*********************************************************/
495lws_start:
496
497#ifdef CONFIG_64BIT
498	ssm	PSW_SM_W, %r1
499	extrd,u	%r1,PSW_W_BIT,1,%r1
500	/* sp must be aligned on 4, so deposit the W bit setting into
501	 * the bottom of sp temporarily */
502	or,od	%r1,%r30,%r30
503
504	/* Clip LWS number to a 32-bit value for 32-bit processes */
505	depdi	0, 31, 32, %r20
506#endif
507
508        /* Is the lws entry number valid? */
509	comiclr,>>	__NR_lws_entries, %r20, %r0
510	b,n	lws_exit_nosys
511
512	/* Load table start */
513	ldil	L%lws_table, %r1
514	ldo	R%lws_table(%r1), %r28	/* Scratch use of r28 */
515	LDREGX	%r20(%sr2,r28), %r21	/* Scratch use of r21 */
516
517	/* Jump to lws, lws table pointers already relocated */
518	be,n	0(%sr2,%r21)
519
520lws_exit_noerror:
521	lws_pagefault_enable	%r1,%r21
522	ldi	__ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
523	stw,ma	%r21, 0(%sr2,%r20)
524	ssm	PSW_SM_I, %r0
525	b	lws_exit
526	copy	%r0, %r21
527
528lws_wouldblock:
529	ssm	PSW_SM_I, %r0
530	ldo	2(%r0), %r28
531	b	lws_exit
532	ldo	-EAGAIN(%r0), %r21
533
534lws_pagefault:
535	lws_pagefault_enable	%r1,%r21
536	ldi	__ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
537	stw,ma	%r21, 0(%sr2,%r20)
538	ssm	PSW_SM_I, %r0
539	ldo	3(%r0),%r28
540	b	lws_exit
541	ldo	-EAGAIN(%r0),%r21
542
543lws_fault:
544	ldo	1(%r0),%r28
545	b	lws_exit
546	ldo	-EFAULT(%r0),%r21
547
548lws_exit_nosys:
549	ldo	-ENOSYS(%r0),%r21
550	/* Fall through: Return to userspace */
551
552lws_exit:
553#ifdef CONFIG_64BIT
554	/* decide whether to reset the wide mode bit
555	 *
556	 * For a syscall, the W bit is stored in the lowest bit
557	 * of sp.  Extract it and reset W if it is zero */
558	extrd,u,*<>	%r30,63,1,%r1
559	rsm	PSW_SM_W, %r0
560	/* now reset the lowest bit of sp if it was set */
561	xor	%r30,%r1,%r30
562#endif
563	be,n	0(%sr7, %r31)
564
565
566
567	/***************************************************
568		Implementing 32bit CAS as an atomic operation:
569
570		%r26 - Address to examine
571		%r25 - Old value to check (old)
572		%r24 - New value to set (new)
573		%r28 - Return prev through this register.
574		%r21 - Kernel error code
575
576		%r21 returns the following error codes:
577		EAGAIN - CAS is busy, ldcw failed, try again.
578		EFAULT - Read or write failed.
579
580		If EAGAIN is returned, %r28 indicates the busy reason:
581		r28 == 1 - CAS is busy. lock contended.
582		r28 == 2 - CAS is busy. ldcw failed.
583		r28 == 3 - CAS is busy. page fault.
584
585		Scratch: r20, r28, r1
586
587	****************************************************/
588
589	/* ELF64 Process entry path */
590lws_compare_and_swap64:
591#ifdef CONFIG_64BIT
592	b,n	lws_compare_and_swap
593#else
594	/* If we are not a 64-bit kernel, then we don't
595	 * have 64-bit input registers, and calling
596	 * the 64-bit LWS CAS returns ENOSYS.
597	 */
598	b,n	lws_exit_nosys
599#endif
600
601	/* ELF32/ELF64 Process entry path */
602lws_compare_and_swap32:
603#ifdef CONFIG_64BIT
604	/* Wide mode user process? */
605	bb,<,n  %sp, 31, lws_compare_and_swap
606
607	/* Clip all the input registers for 32-bit processes */
608	depdi	0, 31, 32, %r26
609	depdi	0, 31, 32, %r25
610	depdi	0, 31, 32, %r24
611#endif
612
613lws_compare_and_swap:
614	/* Trigger memory reference interruptions without writing to memory */
6151:	ldw	0(%r26), %r28
616	proberi	(%r26), PRIV_USER, %r28
617	comb,=,n	%r28, %r0, lws_fault /* backwards, likely not taken */
618	nop
6192:	stbys,e	%r0, 0(%r26)
620
621	/* Calculate 8-bit hash index from virtual address */
622	extru_safe	%r26, 27, 8, %r20
623
624	/* Load start of lock table */
625	ldil	L%lws_lock_start, %r28
626	ldo	R%lws_lock_start(%r28), %r28
627
628	/* Find lock to use, the hash index is one of 0 to
629	   255, multiplied by 16 (keep it 16-byte aligned)
630	   and add to the lock table offset. */
631	shlw	%r20, 4, %r20
632	add	%r20, %r28, %r20
633
634	rsm	PSW_SM_I, %r0				/* Disable interrupts */
635
636	/* Try to acquire the lock */
637	LDCW	0(%sr2,%r20), %r28
638	spinlock_check	%r28, %r21
639	comclr,<>	%r0, %r28, %r0
640	b,n	lws_wouldblock
641
642	/* Disable page faults to prevent sleeping in critical region */
643	lws_pagefault_disable	%r21,%r28
644
645	/*
646		prev = *addr;
647		if ( prev == old )
648		  *addr = new;
649		return prev;
650	*/
651
652	/* NOTES:
653		This all works because intr_do_signal
654		and schedule both check the return iasq
655		and see that we are on the kernel page
656		so this process is never scheduled off
657		or is ever sent any signal of any sort,
658		thus it is wholly atomic from usrspace's
659		perspective
660	*/
661	/* The load and store could fail */
6623:	ldw	0(%r26), %r28
663	sub,<>	%r28, %r25, %r0
6644:	stw	%r24, 0(%r26)
665	b,n	lws_exit_noerror
666
667	/* A fault occurred on load or stbys,e store */
6685:	b,n	lws_fault
669	ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 5b-linux_gateway_page)
670	ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 5b-linux_gateway_page)
671
672	/* A page fault occurred in critical region */
6736:	b,n	lws_pagefault
674	ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 6b-linux_gateway_page)
675	ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 6b-linux_gateway_page)
676
677
678	/***************************************************
679		New CAS implementation which uses pointers and variable size
680		information. The value pointed by old and new MUST NOT change
681		while performing CAS. The lock only protects the value at %r26.
682
683		%r26 - Address to examine
684		%r25 - Pointer to the value to check (old)
685		%r24 - Pointer to the value to set (new)
686		%r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
687		%r28 - Return non-zero on failure
688		%r21 - Kernel error code
689
690		%r21 returns the following error codes:
691		EAGAIN - CAS is busy, ldcw failed, try again.
692		EFAULT - Read or write failed.
693
694		If EAGAIN is returned, %r28 indicates the busy reason:
695		r28 == 1 - CAS is busy. lock contended.
696		r28 == 2 - CAS is busy. ldcw failed.
697		r28 == 3 - CAS is busy. page fault.
698
699		Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
700
701	****************************************************/
702
703lws_compare_and_swap_2:
704#ifdef CONFIG_64BIT
705	/* Wide mode user process? */
706	bb,<,n	%sp, 31, cas2_begin
707
708	/* Clip the input registers for 32-bit processes. We don't
709	   need to clip %r23 as we only use it for word operations */
710	depdi	0, 31, 32, %r26
711	depdi	0, 31, 32, %r25
712	depdi	0, 31, 32, %r24
713#endif
714
715cas2_begin:
716	/* Check the validity of the size pointer */
717	subi,>>= 3, %r23, %r0
718	b,n	lws_exit_nosys
719
720	/* Jump to the functions which will load the old and new values into
721	   registers depending on the their size */
722	shlw	%r23, 2, %r29
723	blr	%r29, %r0
724	nop
725
726	/* 8-bit load */
7271:	ldb	0(%r25), %r25
728	b	cas2_lock_start
7292:	ldb	0(%r24), %r24
730	nop
731	nop
732	nop
733	nop
734	nop
735
736	/* 16-bit load */
7373:	ldh	0(%r25), %r25
738	b	cas2_lock_start
7394:	ldh	0(%r24), %r24
740	nop
741	nop
742	nop
743	nop
744	nop
745
746	/* 32-bit load */
7475:	ldw	0(%r25), %r25
748	b	cas2_lock_start
7496:	ldw	0(%r24), %r24
750	nop
751	nop
752	nop
753	nop
754	nop
755
756	/* 64-bit load */
757#ifdef CONFIG_64BIT
7587:	ldd	0(%r25), %r25
7598:	ldd	0(%r24), %r24
760#else
761	/* Load old value into r22/r23 - high/low */
7627:	ldw	0(%r25), %r22
7638:	ldw	4(%r25), %r23
764	/* Load new value into fr4 for atomic store later */
7659:	flddx	0(%r24), %fr4
766#endif
767
768cas2_lock_start:
769	/* Trigger memory reference interruptions without writing to memory */
770	copy	%r26, %r28
771	depi_safe	0, 31, 2, %r28
77210:	ldw	0(%r28), %r1
773	proberi	(%r28), PRIV_USER, %r1
774	comb,=,n	%r1, %r0, lws_fault /* backwards, likely not taken */
775	nop
77611:	stbys,e	%r0, 0(%r28)
777
778	/* Calculate 8-bit hash index from virtual address */
779	extru_safe	%r26, 27, 8, %r20
780
781	/* Load start of lock table */
782	ldil	L%lws_lock_start, %r28
783	ldo	R%lws_lock_start(%r28), %r28
784
785	/* Find lock to use, the hash index is one of 0 to
786	   255, multiplied by 16 (keep it 16-byte aligned)
787	   and add to the lock table offset. */
788	shlw	%r20, 4, %r20
789	add	%r20, %r28, %r20
790
791	rsm	PSW_SM_I, %r0			/* Disable interrupts */
792
793	/* Try to acquire the lock */
794	LDCW	0(%sr2,%r20), %r28
795	spinlock_check	%r28, %r21
796	comclr,<>	%r0, %r28, %r0
797	b,n	lws_wouldblock
798
799	/* Disable page faults to prevent sleeping in critical region */
800	lws_pagefault_disable	%r21,%r28
801
802	/*
803		prev = *addr;
804		if ( prev == old )
805		  *addr = new;
806		return prev;
807	*/
808
809	/* NOTES:
810		This all works because intr_do_signal
811		and schedule both check the return iasq
812		and see that we are on the kernel page
813		so this process is never scheduled off
814		or is ever sent any signal of any sort,
815		thus it is wholly atomic from usrspace's
816		perspective
817	*/
818
819	/* Jump to the correct function */
820	blr	%r29, %r0
821	/* Set %r28 as non-zero for now */
822	ldo	1(%r0),%r28
823
824	/* 8-bit CAS */
82512:	ldb	0(%r26), %r29
826	sub,=	%r29, %r25, %r0
827	b,n	lws_exit_noerror
82813:	stb	%r24, 0(%r26)
829	b	lws_exit_noerror
830	copy	%r0, %r28
831	nop
832	nop
833
834	/* 16-bit CAS */
83514:	ldh	0(%r26), %r29
836	sub,=	%r29, %r25, %r0
837	b,n	lws_exit_noerror
83815:	sth	%r24, 0(%r26)
839	b	lws_exit_noerror
840	copy	%r0, %r28
841	nop
842	nop
843
844	/* 32-bit CAS */
84516:	ldw	0(%r26), %r29
846	sub,=	%r29, %r25, %r0
847	b,n	lws_exit_noerror
84817:	stw	%r24, 0(%r26)
849	b	lws_exit_noerror
850	copy	%r0, %r28
851	nop
852	nop
853
854	/* 64-bit CAS */
855#ifdef CONFIG_64BIT
85618:	ldd	0(%r26), %r29
857	sub,*=	%r29, %r25, %r0
858	b,n	lws_exit_noerror
85919:	std	%r24, 0(%r26)
860	copy	%r0, %r28
861#else
862	/* Compare first word */
86318:	ldw	0(%r26), %r29
864	sub,=	%r29, %r22, %r0
865	b,n	lws_exit_noerror
866	/* Compare second word */
86719:	ldw	4(%r26), %r29
868	sub,=	%r29, %r23, %r0
869	b,n	lws_exit_noerror
870	/* Perform the store */
87120:	fstdx	%fr4, 0(%r26)
872	copy	%r0, %r28
873#endif
874	b	lws_exit_noerror
875	copy	%r0, %r28
876
877	/* A fault occurred on load or stbys,e store */
87830:	b,n	lws_fault
879	ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
880	ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
881	ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
882	ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
883	ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
884	ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
885	ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
886	ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page)
887#ifndef CONFIG_64BIT
888	ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page)
889#endif
890
891	ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page)
892	ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page)
893
894	/* A page fault occurred in critical region */
89531:	b,n	lws_pagefault
896	ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page)
897	ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page)
898	ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
899	ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
900	ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
901	ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page)
902	ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page)
903	ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page)
904#ifndef CONFIG_64BIT
905	ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page)
906#endif
907
908
909	/***************************************************
910		LWS atomic exchange.
911
912		%r26 - Exchange address
913		%r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
914		%r24 - Address of new value
915		%r23 - Address of old value
916		%r28 - Return non-zero on failure
917		%r21 - Kernel error code
918
919		%r21 returns the following error codes:
920		EAGAIN - CAS is busy, ldcw failed, try again.
921		EFAULT - Read or write failed.
922
923		If EAGAIN is returned, %r28 indicates the busy reason:
924		r28 == 1 - CAS is busy. lock contended.
925		r28 == 2 - CAS is busy. ldcw failed.
926		r28 == 3 - CAS is busy. page fault.
927
928		Scratch: r20, r1
929
930	****************************************************/
931
932lws_atomic_xchg:
933#ifdef CONFIG_64BIT
934	/* Wide mode user process? */
935	bb,<,n	%sp, 31, atomic_xchg_begin
936
937	/* Clip the input registers for 32-bit processes. We don't
938	   need to clip %r23 as we only use it for word operations */
939	depdi	0, 31, 32, %r26
940	depdi	0, 31, 32, %r25
941	depdi	0, 31, 32, %r24
942	depdi	0, 31, 32, %r23
943#endif
944
945atomic_xchg_begin:
946	/* Check the validity of the size pointer */
947	subi,>>= 3, %r25, %r0
948	b,n	lws_exit_nosys
949
950	/* Jump to the functions which will load the old and new values into
951	   registers depending on the their size */
952	shlw	%r25, 2, %r1
953	blr	%r1, %r0
954	nop
955
956	/* Perform exception checks */
957
958	/* 8-bit exchange */
9591:	ldb	0(%r24), %r20
960	proberi	(%r24), PRIV_USER, %r20
961	comb,=,n	%r20, %r0, lws_fault /* backwards, likely not taken */
962	nop
963	copy	%r23, %r20
964	depi_safe	0, 31, 2, %r20
965	b	atomic_xchg_start
9662:	stbys,e	%r0, 0(%r20)
967
968	/* 16-bit exchange */
9693:	ldh	0(%r24), %r20
970	proberi	(%r24), PRIV_USER, %r20
971	comb,=,n	%r20, %r0, lws_fault /* backwards, likely not taken */
972	nop
973	copy	%r23, %r20
974	depi_safe	0, 31, 2, %r20
975	b	atomic_xchg_start
9764:	stbys,e	%r0, 0(%r20)
977
978	/* 32-bit exchange */
9795:	ldw	0(%r24), %r20
980	proberi	(%r24), PRIV_USER, %r20
981	comb,=,n	%r20, %r0, lws_fault /* backwards, likely not taken */
982	nop
983	b	atomic_xchg_start
9846:	stbys,e	%r0, 0(%r23)
985	nop
986	nop
987
988	/* 64-bit exchange */
989#ifdef CONFIG_64BIT
9907:	ldd	0(%r24), %r20
991	proberi	(%r24), PRIV_USER, %r20
992	comb,=,n	%r20, %r0, lws_fault /* backwards, likely not taken */
993	nop
9948:	stdby,e	%r0, 0(%r23)
995#else
9967:	ldw	0(%r24), %r20
9978:	ldw	4(%r24), %r20
998	proberi	(%r24), PRIV_USER, %r20
999	comb,=,n	%r20, %r0, lws_fault /* backwards, likely not taken */
1000	nop
1001	copy	%r23, %r20
1002	depi_safe	0, 31, 2, %r20
10039:	stbys,e	%r0, 0(%r20)
100410:	stbys,e	%r0, 4(%r20)
1005#endif
1006
1007atomic_xchg_start:
1008	/* Trigger memory reference interruptions without writing to memory */
1009	copy	%r26, %r28
1010	depi_safe	0, 31, 2, %r28
101111:	ldw	0(%r28), %r1
101212:	stbys,e	%r0, 0(%r28)
1013
1014	/* Calculate 8-bit hash index from virtual address */
1015	extru_safe  %r26, 27, 8, %r20
1016
1017	/* Load start of lock table */
1018	ldil	L%lws_lock_start, %r28
1019	ldo	R%lws_lock_start(%r28), %r28
1020
1021	/* Find lock to use, the hash index is one of 0 to
1022	   255, multiplied by 16 (keep it 16-byte aligned)
1023	   and add to the lock table offset. */
1024	shlw	%r20, 4, %r20
1025	add	%r20, %r28, %r20
1026
1027	rsm	PSW_SM_I, %r0			/* Disable interrupts */
1028
1029	/* Try to acquire the lock */
1030	LDCW	0(%sr2,%r20), %r28
1031	spinlock_check	%r28, %r21
1032	comclr,<>	%r0, %r28, %r0
1033	b,n	lws_wouldblock
1034
1035	/* Disable page faults to prevent sleeping in critical region */
1036	lws_pagefault_disable	%r21,%r28
1037
1038	/* NOTES:
1039		This all works because intr_do_signal
1040		and schedule both check the return iasq
1041		and see that we are on the kernel page
1042		so this process is never scheduled off
1043		or is ever sent any signal of any sort,
1044		thus it is wholly atomic from userspace's
1045		perspective
1046	*/
1047
1048	/* Jump to the correct function */
1049	blr	%r1, %r0
1050	/* Set %r28 as non-zero for now */
1051	ldo	1(%r0),%r28
1052
1053	/* 8-bit exchange */
105414:	ldb	0(%r26), %r1
105515:	stb	%r1, 0(%r23)
105615:	ldb	0(%r24), %r1
105717:	stb	%r1, 0(%r26)
1058	b	lws_exit_noerror
1059	copy	%r0, %r28
1060	nop
1061	nop
1062
1063	/* 16-bit exchange */
106418:	ldh	0(%r26), %r1
106519:	sth	%r1, 0(%r23)
106620:	ldh	0(%r24), %r1
106721:	sth	%r1, 0(%r26)
1068	b	lws_exit_noerror
1069	copy	%r0, %r28
1070	nop
1071	nop
1072
1073	/* 32-bit exchange */
107422:	ldw	0(%r26), %r1
107523:	stw	%r1, 0(%r23)
107624:	ldw	0(%r24), %r1
107725:	stw	%r1, 0(%r26)
1078	b	lws_exit_noerror
1079	copy	%r0, %r28
1080	nop
1081	nop
1082
1083	/* 64-bit exchange */
1084#ifdef CONFIG_64BIT
108526:	ldd	0(%r26), %r1
108627:	std	%r1, 0(%r23)
108728:	ldd	0(%r24), %r1
108829:	std	%r1, 0(%r26)
1089#else
109026:	flddx	0(%r26), %fr4
109127:	fstdx	%fr4, 0(%r23)
109228:	flddx	0(%r24), %fr4
109329:	fstdx	%fr4, 0(%r26)
1094#endif
1095	b	lws_exit_noerror
1096	copy	%r0, %r28
1097
1098	/* A fault occurred on load or stbys,e store */
109930:	b,n	lws_fault
1100	ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
1101	ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
1102	ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
1103	ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
1104	ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
1105	ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
1106	ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
1107	ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page)
1108#ifndef CONFIG_64BIT
1109	ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page)
1110	ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page)
1111#endif
1112
1113	ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page)
1114	ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 30b-linux_gateway_page)
1115
1116	/* A page fault occurred in critical region */
111731:	b,n	lws_pagefault
1118	ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
1119	ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
1120	ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
1121	ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page)
1122	ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page)
1123	ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page)
1124	ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page)
1125	ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 31b-linux_gateway_page)
1126	ASM_EXCEPTIONTABLE_ENTRY(22b-linux_gateway_page, 31b-linux_gateway_page)
1127	ASM_EXCEPTIONTABLE_ENTRY(23b-linux_gateway_page, 31b-linux_gateway_page)
1128	ASM_EXCEPTIONTABLE_ENTRY(24b-linux_gateway_page, 31b-linux_gateway_page)
1129	ASM_EXCEPTIONTABLE_ENTRY(25b-linux_gateway_page, 31b-linux_gateway_page)
1130	ASM_EXCEPTIONTABLE_ENTRY(26b-linux_gateway_page, 31b-linux_gateway_page)
1131	ASM_EXCEPTIONTABLE_ENTRY(27b-linux_gateway_page, 31b-linux_gateway_page)
1132	ASM_EXCEPTIONTABLE_ENTRY(28b-linux_gateway_page, 31b-linux_gateway_page)
1133	ASM_EXCEPTIONTABLE_ENTRY(29b-linux_gateway_page, 31b-linux_gateway_page)
1134
1135	/***************************************************
1136		LWS atomic store.
1137
1138		%r26 - Address to store
1139		%r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
1140		%r24 - Address of value to store
1141		%r28 - Return non-zero on failure
1142		%r21 - Kernel error code
1143
1144		%r21 returns the following error codes:
1145		EAGAIN - CAS is busy, ldcw failed, try again.
1146		EFAULT - Read or write failed.
1147
1148		If EAGAIN is returned, %r28 indicates the busy reason:
1149		r28 == 1 - CAS is busy. lock contended.
1150		r28 == 2 - CAS is busy. ldcw failed.
1151		r28 == 3 - CAS is busy. page fault.
1152
1153		Scratch: r20, r1
1154
1155	****************************************************/
1156
1157lws_atomic_store:
1158#ifdef CONFIG_64BIT
1159	/* Wide mode user process? */
1160	bb,<,n	%sp, 31, atomic_store_begin
1161
1162	/* Clip the input registers for 32-bit processes. We don't
1163	   need to clip %r23 as we only use it for word operations */
1164	depdi	0, 31, 32, %r26
1165	depdi	0, 31, 32, %r25
1166	depdi	0, 31, 32, %r24
1167#endif
1168
1169atomic_store_begin:
1170	/* Check the validity of the size pointer */
1171	subi,>>= 3, %r25, %r0
1172	b,n	lws_exit_nosys
1173
1174	shlw	%r25, 1, %r1
1175	blr	%r1, %r0
1176	nop
1177
1178	/* Perform exception checks */
1179
1180	/* 8-bit store */
11811:	ldb	0(%r24), %r20
1182	b,n	atomic_store_start
1183	nop
1184	nop
1185
1186	/* 16-bit store */
11872:	ldh	0(%r24), %r20
1188	b,n	atomic_store_start
1189	nop
1190	nop
1191
1192	/* 32-bit store */
11933:	ldw	0(%r24), %r20
1194	b,n	atomic_store_start
1195	nop
1196	nop
1197
1198	/* 64-bit store */
1199#ifdef CONFIG_64BIT
12004:	ldd	0(%r24), %r20
1201#else
12024:	ldw	0(%r24), %r20
12035:	ldw	4(%r24), %r20
1204#endif
1205
1206atomic_store_start:
1207	/* Trigger memory reference interruptions without writing to memory */
1208	copy	%r26, %r28
1209	depi_safe	0, 31, 2, %r28
12106:	ldw	0(%r28), %r1
12117:	stbys,e	%r0, 0(%r28)
1212
1213	/* Calculate 8-bit hash index from virtual address */
1214	extru_safe  %r26, 27, 8, %r20
1215
1216	/* Load start of lock table */
1217	ldil	L%lws_lock_start, %r28
1218	ldo	R%lws_lock_start(%r28), %r28
1219
1220	/* Find lock to use, the hash index is one of 0 to
1221	   255, multiplied by 16 (keep it 16-byte aligned)
1222	   and add to the lock table offset. */
1223	shlw	%r20, 4, %r20
1224	add	%r20, %r28, %r20
1225
1226	rsm	PSW_SM_I, %r0			/* Disable interrupts */
1227
1228	/* Try to acquire the lock */
1229	LDCW	0(%sr2,%r20), %r28
1230	spinlock_check	%r28, %r21
1231	comclr,<>	%r0, %r28, %r0
1232	b,n	lws_wouldblock
1233
1234	/* Disable page faults to prevent sleeping in critical region */
1235	lws_pagefault_disable	%r21,%r28
1236
1237	/* NOTES:
1238		This all works because intr_do_signal
1239		and schedule both check the return iasq
1240		and see that we are on the kernel page
1241		so this process is never scheduled off
1242		or is ever sent any signal of any sort,
1243		thus it is wholly atomic from userspace's
1244		perspective
1245	*/
1246
1247	/* Jump to the correct function */
1248	blr	%r1, %r0
1249	/* Set %r28 as non-zero for now */
1250	ldo	1(%r0),%r28
1251
1252	/* 8-bit store */
12539:	ldb	0(%r24), %r1
125410:	stb	%r1, 0(%r26)
1255	b	lws_exit_noerror
1256	copy	%r0, %r28
1257
1258	/* 16-bit store */
125911:	ldh	0(%r24), %r1
126012:	sth	%r1, 0(%r26)
1261	b	lws_exit_noerror
1262	copy	%r0, %r28
1263
1264	/* 32-bit store */
126513:	ldw	0(%r24), %r1
126614:	stw	%r1, 0(%r26)
1267	b	lws_exit_noerror
1268	copy	%r0, %r28
1269
1270	/* 64-bit store */
1271#ifdef CONFIG_64BIT
127215:	ldd	0(%r24), %r1
127316:	std	%r1, 0(%r26)
1274#else
127515:	flddx	0(%r24), %fr4
127616:	fstdx	%fr4, 0(%r26)
1277#endif
1278	b	lws_exit_noerror
1279	copy	%r0, %r28
1280
1281	/* A fault occurred on load or stbys,e store */
128230:	b,n	lws_fault
1283	ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
1284	ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
1285	ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
1286	ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
1287#ifndef CONFIG_64BIT
1288	ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
1289#endif
1290
1291	ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
1292	ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
1293
1294	/* A page fault occurred in critical region */
129531:	b,n	lws_pagefault
1296	ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 31b-linux_gateway_page)
1297	ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 31b-linux_gateway_page)
1298	ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 31b-linux_gateway_page)
1299	ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page)
1300	ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page)
1301	ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
1302	ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
1303	ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
1304
1305	/* Make sure nothing else is placed on this page */
1306	.align PAGE_SIZE
1307END(linux_gateway_page)
1308ENTRY(end_linux_gateway_page)
1309
1310	/* Relocate symbols assuming linux_gateway_page is mapped
1311	   to virtual address 0x0 */
1312
1313#define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page)
1314
1315	.section .rodata,"a"
1316
1317	.align 8
1318	/* Light-weight-syscall table */
1319	/* Start of lws table. */
1320ENTRY(lws_table)
1321	LWS_ENTRY(compare_and_swap32)		/* 0 - ELF32 Atomic 32bit CAS */
1322	LWS_ENTRY(compare_and_swap64)		/* 1 - ELF64 Atomic 32bit CAS */
1323	LWS_ENTRY(compare_and_swap_2)		/* 2 - Atomic 64bit CAS */
1324	LWS_ENTRY(atomic_xchg)			/* 3 - Atomic Exchange */
1325	LWS_ENTRY(atomic_store)			/* 4 - Atomic Store */
1326END(lws_table)
1327	/* End of lws table */
1328
1329#ifdef CONFIG_64BIT
1330#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, compat)
1331#else
1332#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, native)
1333#endif
1334#define __SYSCALL(nr, entry)	ASM_ULONG_INSN entry
1335	.align 8
1336ENTRY(sys_call_table)
1337	.export sys_call_table,data
1338#include <asm/syscall_table_32.h>    /* 32-bit syscalls */
1339END(sys_call_table)
1340
1341#ifdef CONFIG_64BIT
1342#undef __SYSCALL_WITH_COMPAT
1343#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, native)
1344	.align 8
1345ENTRY(sys_call_table64)
1346#include <asm/syscall_table_64.h>    /* 64-bit syscalls */
1347END(sys_call_table64)
1348#endif
1349
1350	/*
1351		All light-weight-syscall atomic operations
1352		will use this set of locks
1353
1354		NOTE: The lws_lock_start symbol must be
1355		at least 16-byte aligned for safe use
1356		with ldcw.
1357	*/
1358	.section .data
1359	.align	L1_CACHE_BYTES
1360ENTRY(lws_lock_start)
1361	/* lws locks */
1362	.rept 256
1363	/* Keep locks aligned at 16-bytes */
1364	.word __ARCH_SPIN_LOCK_UNLOCKED_VAL
1365	.word 0
1366	.word 0
1367	.word 0
1368	.endr
1369END(lws_lock_start)
1370	.previous
1371
1372.end
1373