Lines Matching +full:reg +full:- +full:names
15 * Mnemonic names for arguments to memcpy/__copy_user
23 * dma-coherent systems.
36 #include <asm/asm-offsets.h>
49 * - src and dst don't overlap
50 * - src is readable
51 * - dst is writable
59 * - src is readable (no exceptions when reading src)
61 * - dst is writable (no exceptions when writing dst)
62 * __copy_user uses a non-standard calling convention; see
63 * include/asm-mips/uaccess.h
76 * 1- AT contain the address of the byte just past the end of the source
78 * 2- src_entry <= src < AT, and
79 * 3- (dst - src) == (dst_entry - src_entry),
110 * reg : Register
115 #define EXC(insn, type, reg, addr, handler) \ argument
117 9: insn reg, addr; \
126 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
135 insn reg, addr; \
140 * Only on the 64-bit kernel we can made use of 64-bit registers.
149 #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) argument
150 #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) argument
151 #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) argument
152 #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) argument
153 #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) argument
154 #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) argument
186 #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) argument
187 #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) argument
188 #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) argument
189 #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) argument
190 #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) argument
191 #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) argument
204 #define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler) argument
205 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) argument
250 #define REST(unit) (FIRST(unit)+NBYTES-1)
253 #define ADDRMASK (NBYTES-1)
313 * use delay slot for fall-through
319 and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
338 STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
339 STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
340 STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
341 STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
342 STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
343 STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
356 and rem, len, (NBYTES-1) # rem = len % NBYTES
397 * because can't assume read-access to dst. Instead, use
401 * wide-issue mips processors because the code has fewer branches and
402 * more instruction-level parallelism.
412 STREST(t0, -1(t1), .Ls_exc\@)
443 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
479 and rem, len, NBYTES-1 # rem = len % NBYTES
515 LOADB(t0, NBYTES-2(src), .Ll_exc\@)
518 STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
561 sb t1, 0(dst) # can't fault -- we're copy_from_user
607 sltu t0, a1, t0 # dst + len <= src -> memcpy
608 sltu t1, a0, t1 # dst >= src + len -> memcpy
625 lb t0, -1(a1)
627 sb t0, -1(a0)
666 /* Legacy Mode, user <-> user */
676 * virtual <-> physical translation when a virtual address is actually in user