Lines Matching full:n

40         asm("v3 = vmem(%0 + #0)\n\t"  in test_load_tmp()
41 "r1 = #1\n\t" in test_load_tmp()
42 "v12 = vsplat(r1)\n\t" in test_load_tmp()
43 "{\n\t" in test_load_tmp()
44 " v12.tmp = vmem(%1 + #0)\n\t" in test_load_tmp()
45 " v4.w = vadd(v12.w, v3.w)\n\t" in test_load_tmp()
46 "}\n\t" in test_load_tmp()
47 "v4.w = vadd(v4.w, v12.w)\n\t" in test_load_tmp()
48 "vmem(%2 + #0) = v4\n\t" in test_load_tmp()
69 "r0 = #0x03030303\n\t" in test_load_tmp2()
70 "v16 = vsplat(r0)\n\t" in test_load_tmp2()
71 "r0 = #0x04040404\n\t" in test_load_tmp2()
72 "v18 = vsplat(r0)\n\t" in test_load_tmp2()
73 "r0 = #0x05050505\n\t" in test_load_tmp2()
74 "v21 = vsplat(r0)\n\t" in test_load_tmp2()
75 "{\n\t" in test_load_tmp2()
76 " v25:24 += vmpyo(v18.w, v14.h)\n\t" in test_load_tmp2()
77 " v15:14.tmp = vcombine(v21, v16)\n\t" in test_load_tmp2()
78 "}\n\t" in test_load_tmp2()
79 "vmem(%0 + #0) = v24\n\t" in test_load_tmp2()
80 "vmem(%1 + #0) = v25\n\t" in test_load_tmp2()
99 asm("{\n\t" in test_load_cur()
100 " v2.cur = vmem(%0 + #0)\n\t" in test_load_cur()
101 " vmem(%1 + #0) = v2\n\t" in test_load_cur()
102 "}\n\t" in test_load_cur()
123 asm("v2 = vmem(%0 + #0)\n\t" in test_load_aligned()
124 "vmem(%1 + #0) = v2\n\t" in test_load_aligned()
139 asm("v2 = vmemu(%0 + #0)\n\t" in test_load_unaligned()
140 "vmem(%1 + #0) = v2\n\t" in test_load_unaligned()
156 asm("v2 = vmem(%0 + #0)\n\t" in test_store_aligned()
157 "vmem(%1 + #0) = v2\n\t" in test_store_aligned()
172 asm("v2 = vmem(%0 + #0)\n\t" in test_store_unaligned()
173 "vmemu(%1 + #0) = v2\n\t" in test_store_unaligned()
193 asm("r4 = #0\n\t" in test_masked_store()
194 "v4 = vsplat(r4)\n\t" in test_masked_store()
195 "v5 = vmem(%0 + #0)\n\t" in test_masked_store()
196 "q0 = vcmp.eq(v4.w, v5.w)\n\t" in test_masked_store()
197 "v5 = vmem(%1)\n\t" in test_masked_store()
198 "if (!q0) vmem(%2) = v5\n\t" /* Inverted test */ in test_masked_store()
202 asm("r4 = #0\n\t" in test_masked_store()
203 "v4 = vsplat(r4)\n\t" in test_masked_store()
204 "v5 = vmem(%0 + #0)\n\t" in test_masked_store()
205 "q0 = vcmp.eq(v4.w, v5.w)\n\t" in test_masked_store()
206 "v5 = vmem(%1)\n\t" in test_masked_store()
207 "if (q0) vmem(%2) = v5\n\t" /* Non-inverted test */ in test_masked_store()
237 asm("{\n\t" in test_new_value_store()
238 " v2 = vmem(%0 + #0)\n\t" in test_new_value_store()
239 " vmem(%1 + #0) = v2.new\n\t" in test_new_value_store()
240 "}\n\t" in test_new_value_store()
248 asm("v7 = vmem(%0 + #0)\n\t" in test_new_value_store()
249 "v12 = vmem(%1 + #0)\n\t" in test_new_value_store()
250 "{\n\t" in test_new_value_store()
251 " v5:4 = vcombine(v12, v7)\n\t" in test_new_value_store()
252 " vmem(%2 + #0) = v5.new\n\t" in test_new_value_store()
253 "}\n\t" in test_new_value_store()
266 asm("v0 = vmem(%0 + #0)\n\t" in test_max_temps()
267 "v1 = vmem(%0 + #1)\n\t" in test_max_temps()
268 "v2 = vmem(%0 + #2)\n\t" in test_max_temps()
269 "v3 = vmem(%0 + #3)\n\t" in test_max_temps()
270 "v4 = vmem(%0 + #4)\n\t" in test_max_temps()
271 "{\n\t" in test_max_temps()
272 " v1:0.w = vadd(v3:2.w, v1:0.w)\n\t" in test_max_temps()
273 " v2.b = vshuffe(v3.b, v2.b)\n\t" in test_max_temps()
274 " v3.w = vadd(v1.w, v4.w)\n\t" in test_max_temps()
275 " v4.tmp = vmem(%0 + #5)\n\t" in test_max_temps()
276 "}\n\t" in test_max_temps()
277 "vmem(%1 + #0) = v0\n\t" in test_max_temps()
278 "vmem(%1 + #1) = v1\n\t" in test_max_temps()
279 "vmem(%1 + #2) = v2\n\t" in test_max_temps()
280 "vmem(%1 + #3) = v3\n\t" in test_max_temps()
281 "vmem(%1 + #4) = v4\n\t" in test_max_temps()
336 asm volatile ("v10 = vsplat(%0)\n\t" in test_vadduwsat()
337 "v11 = vsplat(%1)\n\t" in test_vadduwsat()
338 "v21.uw = vadd(v11.uw, v10.uw):sat\n\t" in test_vadduwsat()
339 "vmem(%2+#0) = v21\n\t" in test_vadduwsat()
370 asm volatile ("v16 = vsplat(%0)\n\t" in test_vsubuwsat_dv()
371 "v17 = vsplat(%1)\n\t" in test_vsubuwsat_dv()
372 "v26 = vsplat(%2)\n\t" in test_vsubuwsat_dv()
373 "v27 = vsplat(%3)\n\t" in test_vsubuwsat_dv()
374 "v25:24.uw = vsub(v17:16.uw, v27:26.uw):sat\n\t" in test_vsubuwsat_dv()
375 "vmem(%4+#0) = v24\n\t" in test_vsubuwsat_dv()
376 "vmem(%4+#1) = v25\n\t" in test_vsubuwsat_dv()
404 asm("v3 = vmem(%0 + #0)\n\t" in test_load_tmp_predicated()
405 "r1 = #1\n\t" in test_load_tmp_predicated()
406 "v12 = vsplat(r1)\n\t" in test_load_tmp_predicated()
407 "p1 = !cmp.eq(%3, #0)\n\t" in test_load_tmp_predicated()
408 "{\n\t" in test_load_tmp_predicated()
409 " if (p1) v12.tmp = vmem(%1 + #0)\n\t" in test_load_tmp_predicated()
410 " v4.w = vadd(v12.w, v3.w)\n\t" in test_load_tmp_predicated()
411 "}\n\t" in test_load_tmp_predicated()
412 "v4.w = vadd(v4.w, v12.w)\n\t" in test_load_tmp_predicated()
413 "vmem(%2 + #0) = v4\n\t" in test_load_tmp_predicated()
435 asm volatile("p0 = !cmp.eq(%3, #0)\n\t" in test_load_cur_predicated()
436 "v3 = vmem(%0+#0)\n\t" in test_load_cur_predicated()
441 "r0 = #0x01237654\n\t" in test_load_cur_predicated()
442 "v4 = vsplat(r0)\n\t" in test_load_cur_predicated()
443 "{\n\t" in test_load_cur_predicated()
444 " if (p0) v3.cur = vmem(%1+#0)\n\t" in test_load_cur_predicated()
445 " v4 = v3\n\t" in test_load_cur_predicated()
446 "}\n\t" in test_load_cur_predicated()
447 "vmem(%2+#0) = v4\n\t" in test_load_cur_predicated()
461 asm volatile("v2 = vsplat(%0)\n\t" in test_vcombine()
462 "v3 = vsplat(%1)\n\t" in test_vcombine()
463 "v3:2 = vcombine(v2, v3)\n\t" in test_vcombine()
464 "vmem(%2+#0) = v2\n\t" in test_vcombine()
465 "vmem(%2+#1) = v3\n\t" in test_vcombine()
480 "r0 = #0x12345678\n" in test_store_new()
481 "v0 = vsplat(r0)\n" in test_store_new()
482 "r0 = #0xff00ff00\n" in test_store_new()
483 "v1 = vsplat(r0)\n" in test_store_new()
484 "{\n" in test_store_new()
485 " vdeal(v1,v0,r0)\n" in test_store_new()
486 " vmem(%0) = v0.new\n" in test_store_new()
487 "}\n" in test_store_new()