Lines Matching +full:1 +full:v8
32 asm volatile("std 1,%0" : "=Q" (state->fprs[1])); in __kernel_fpu_begin()
57 " la 1,%[vxrs]\n" /* load save area */ in __kernel_fpu_begin()
63 * case a vstm V8..V23 is the best instruction in __kernel_fpu_begin()
66 " jne 0f\n" /* -> save V8..V23 */ in __kernel_fpu_begin()
67 " VSTM 8,23,128,1\n" /* vstm %v8,%v23,128(%r1) */ in __kernel_fpu_begin()
73 " brc 2,1f\n" /* 10 -> save V8..V15 */ in __kernel_fpu_begin()
74 " VSTM 0,7,0,1\n" /* vstm %v0,%v7,0(%r1) */ in __kernel_fpu_begin()
76 "1: VSTM 8,15,128,1\n" /* vstm %v8,%v15,128(%r1) */ in __kernel_fpu_begin()
78 "2: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */ in __kernel_fpu_begin()
84 " VSTM 16,23,256,1\n" /* vstm %v16,%v23,256(%r1) */ in __kernel_fpu_begin()
86 "4: VSTM 24,31,384,1\n" /* vstm %v24,%v31,384(%r1) */ in __kernel_fpu_begin()
88 "5: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */ in __kernel_fpu_begin()
89 "6: VSTM 16,31,256,1\n" /* vstm %v16,%v31,256(%r1) */ in __kernel_fpu_begin()
93 : "1", "cc"); in __kernel_fpu_begin()
114 asm volatile("ld 1,%0" : : "Q" (state->fprs[1])); in __kernel_fpu_end()
139 " la 1,%[vxrs]\n" /* load restore area */ in __kernel_fpu_end()
145 * case a vlm V8..V23 is the best instruction in __kernel_fpu_end()
148 " jne 0f\n" /* -> restore V8..V23 */ in __kernel_fpu_end()
149 " VLM 8,23,128,1\n" /* vlm %v8,%v23,128(%r1) */ in __kernel_fpu_end()
155 " brc 2,1f\n" /* 10 -> restore V8..V15 */ in __kernel_fpu_end()
156 " VLM 0,7,0,1\n" /* vlm %v0,%v7,0(%r1) */ in __kernel_fpu_end()
158 "1: VLM 8,15,128,1\n" /* vlm %v8,%v15,128(%r1) */ in __kernel_fpu_end()
160 "2: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */ in __kernel_fpu_end()
166 " VLM 16,23,256,1\n" /* vlm %v16,%v23,256(%r1) */ in __kernel_fpu_end()
168 "4: VLM 24,31,384,1\n" /* vlm %v24,%v31,384(%r1) */ in __kernel_fpu_end()
170 "5: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */ in __kernel_fpu_end()
171 "6: VLM 16,31,256,1\n" /* vlm %v16,%v31,256(%r1) */ in __kernel_fpu_end()
175 : "1", "cc"); in __kernel_fpu_end()