1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2d550bbd4SDavid Howells #ifndef __SPARC64_SWITCH_TO_64_H 3d550bbd4SDavid Howells #define __SPARC64_SWITCH_TO_64_H 4d550bbd4SDavid Howells 5d550bbd4SDavid Howells #include <asm/visasm.h> 6d550bbd4SDavid Howells 7d550bbd4SDavid Howells #define prepare_arch_switch(next) \ 8d550bbd4SDavid Howells do { \ 9d550bbd4SDavid Howells flushw_all(); \ 10d550bbd4SDavid Howells } while (0) 11d550bbd4SDavid Howells 12d550bbd4SDavid Howells /* See what happens when you design the chip correctly? 13d550bbd4SDavid Howells * 14d550bbd4SDavid Howells * We tell gcc we clobber all non-fixed-usage registers except 15d550bbd4SDavid Howells * for l0/l1. It will use one for 'next' and the other to hold 16d550bbd4SDavid Howells * the output value of 'last'. 'next' is not referenced again 17d550bbd4SDavid Howells * past the invocation of switch_to in the scheduler, so we need 18*3cc208ffSBjorn Helgaas * not preserve its value. Hairy, but it lets us remove 2 loads 19d550bbd4SDavid Howells * and 2 stores in this critical code path. -DaveM 20d550bbd4SDavid Howells */ 21d550bbd4SDavid Howells #define switch_to(prev, next, last) \ 22f36391d2SDavid S. Miller do { save_and_clear_fpu(); \ 23d550bbd4SDavid Howells __asm__ __volatile__("wr %%g0, %0, %%asi" \ 24a5ad8378SArnd Bergmann : : "r" (ASI_AIUS)); \ 25d550bbd4SDavid Howells trap_block[current_thread_info()->cpu].thread = \ 26d550bbd4SDavid Howells task_thread_info(next); \ 27d550bbd4SDavid Howells __asm__ __volatile__( \ 28d550bbd4SDavid Howells "mov %%g4, %%g7\n\t" \ 29d550bbd4SDavid Howells "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ 30d550bbd4SDavid Howells "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ 31d550bbd4SDavid Howells "rdpr %%wstate, %%o5\n\t" \ 32d550bbd4SDavid Howells "stx %%o6, [%%g6 + %6]\n\t" \ 33d550bbd4SDavid Howells "stb %%o5, [%%g6 + %5]\n\t" \ 34d550bbd4SDavid Howells "rdpr %%cwp, %%o5\n\t" \ 35d550bbd4SDavid Howells "stb %%o5, [%%g6 + %8]\n\t" \ 36d550bbd4SDavid Howells "wrpr %%g0, 15, %%pil\n\t" \ 37d550bbd4SDavid Howells "mov %4, %%g6\n\t" \ 38d550bbd4SDavid Howells "ldub [%4 + %8], %%g1\n\t" \ 39d550bbd4SDavid Howells "wrpr %%g1, %%cwp\n\t" \ 40d550bbd4SDavid Howells "ldx [%%g6 + %6], %%o6\n\t" \ 41d550bbd4SDavid Howells "ldub [%%g6 + %5], %%o5\n\t" \ 42d550bbd4SDavid Howells "ldub [%%g6 + %7], %%o7\n\t" \ 43d550bbd4SDavid Howells "wrpr %%o5, 0x0, %%wstate\n\t" \ 44d550bbd4SDavid Howells "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ 45d550bbd4SDavid Howells "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ 46d550bbd4SDavid Howells "ldx [%%g6 + %9], %%g4\n\t" \ 47d550bbd4SDavid Howells "wrpr %%g0, 14, %%pil\n\t" \ 48d550bbd4SDavid Howells "brz,pt %%o7, switch_to_pc\n\t" \ 49d550bbd4SDavid Howells " mov %%g7, %0\n\t" \ 5037d6fa34SKirill Tkhai "sethi %%hi(ret_from_fork), %%g1\n\t" \ 5137d6fa34SKirill Tkhai "jmpl %%g1 + %%lo(ret_from_fork), %%g0\n\t" \ 52d550bbd4SDavid Howells " nop\n\t" \ 53d550bbd4SDavid Howells ".globl switch_to_pc\n\t" \ 54d550bbd4SDavid Howells "switch_to_pc:\n\t" \ 55d550bbd4SDavid Howells : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \ 56d550bbd4SDavid Howells "=r" (__local_per_cpu_offset) \ 57d550bbd4SDavid Howells : "0" (task_thread_info(next)), \ 58d550bbd4SDavid Howells "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ 59d550bbd4SDavid Howells "i" (TI_CWP), "i" (TI_TASK) \ 60d550bbd4SDavid Howells : "cc", \ 61d550bbd4SDavid Howells "g1", "g2", "g3", "g7", \ 62d550bbd4SDavid Howells "l1", "l2", "l3", "l4", "l5", "l6", "l7", \ 63d550bbd4SDavid Howells "i0", "i1", "i2", "i3", "i4", "i5", \ 64d550bbd4SDavid Howells "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ 65d550bbd4SDavid Howells } while(0) 66d550bbd4SDavid Howells 67f05a6865SSam Ravnborg void synchronize_user_stack(void); 685b4fc388SDavid Miller struct pt_regs; 695b4fc388SDavid Miller void fault_in_user_windows(struct pt_regs *); 70d550bbd4SDavid Howells 71d550bbd4SDavid Howells #endif /* __SPARC64_SWITCH_TO_64_H */ 72