Lines Matching +full:3 +full:- +full:n

1 // SPDX-License-Identifier: GPL-2.0-or-later
35 "\tldil L%%" #lbl ", %%r1\n" \
36 "\tldo R%%" #lbl "(%%r1), %%r1\n" \
37 "\tbv,n %%r0(%%r1)\n"
51 /* skip LDB - never unaligned (index) */
59 /* skip LDB - never unaligned (short) */
67 /* skip STB - never unaligned */
71 /* skip STBY - never unaligned */
72 /* skip STDBY - never unaligned */
111 #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0)) argument
116 #define ERR_NOTHANDLED -1
117 #define ERR_PAGEFAULT -2
123 unsigned long saddr = regs->ior; in emulate_ldh()
127 DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n", in emulate_ldh()
128 regs->isr, regs->ior, toreg); in emulate_ldh()
131 " mtsp %4, %%sr1\n" in emulate_ldh()
132 "1: ldbs 0(%%sr1,%3), %%r20\n" in emulate_ldh()
133 "2: ldbs 1(%%sr1,%3), %0\n" in emulate_ldh()
134 " depw %%r20, 23, 24, %0\n" in emulate_ldh()
135 " copy %%r0, %1\n" in emulate_ldh()
136 "3: \n" in emulate_ldh()
137 " .section .fixup,\"ax\"\n" in emulate_ldh()
138 "4: ldi -2, %1\n" in emulate_ldh()
139 FIXUP_BRANCH(3b) in emulate_ldh()
140 " .previous\n" in emulate_ldh()
144 : "0" (val), "r" (saddr), "r" (regs->isr) in emulate_ldh()
147 DPRINTF("val = 0x" RFMT "\n", val); in emulate_ldh()
150 regs->gr[toreg] = val; in emulate_ldh()
157 unsigned long saddr = regs->ior; in emulate_ldw()
161 DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n", in emulate_ldw()
162 regs->isr, regs->ior, toreg); in emulate_ldw()
165 " zdep %3,28,2,%%r19\n" /* r19=(ofs&3)*8 */ in emulate_ldw()
166 " mtsp %4, %%sr1\n" in emulate_ldw()
167 " depw %%r0,31,2,%3\n" in emulate_ldw()
168 "1: ldw 0(%%sr1,%3),%0\n" in emulate_ldw()
169 "2: ldw 4(%%sr1,%3),%%r20\n" in emulate_ldw()
170 " subi 32,%%r19,%%r19\n" in emulate_ldw()
171 " mtctl %%r19,11\n" in emulate_ldw()
172 " vshd %0,%%r20,%0\n" in emulate_ldw()
173 " copy %%r0, %1\n" in emulate_ldw()
174 "3: \n" in emulate_ldw()
175 " .section .fixup,\"ax\"\n" in emulate_ldw()
176 "4: ldi -2, %1\n" in emulate_ldw()
177 FIXUP_BRANCH(3b) in emulate_ldw()
178 " .previous\n" in emulate_ldw()
182 : "0" (val), "r" (saddr), "r" (regs->isr) in emulate_ldw()
185 DPRINTF("val = 0x" RFMT "\n", val); in emulate_ldw()
188 ((__u32*)(regs->fr))[toreg] = val; in emulate_ldw()
190 regs->gr[toreg] = val; in emulate_ldw()
196 unsigned long saddr = regs->ior; in emulate_ldd()
200 DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n", in emulate_ldd()
201 regs->isr, regs->ior, toreg); in emulate_ldd()
206 return -1; in emulate_ldd()
209 " depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */ in emulate_ldd()
210 " mtsp %4, %%sr1\n" in emulate_ldd()
211 " depd %%r0,63,3,%3\n" in emulate_ldd()
212 "1: ldd 0(%%sr1,%3),%0\n" in emulate_ldd()
213 "2: ldd 8(%%sr1,%3),%%r20\n" in emulate_ldd()
214 " subi 64,%%r19,%%r19\n" in emulate_ldd()
215 " mtsar %%r19\n" in emulate_ldd()
216 " shrpd %0,%%r20,%%sar,%0\n" in emulate_ldd()
217 " copy %%r0, %1\n" in emulate_ldd()
218 "3: \n" in emulate_ldd()
219 " .section .fixup,\"ax\"\n" in emulate_ldd()
220 "4: ldi -2, %1\n" in emulate_ldd()
221 FIXUP_BRANCH(3b) in emulate_ldd()
222 " .previous\n" in emulate_ldd()
226 : "0" (val), "r" (saddr), "r" (regs->isr) in emulate_ldd()
232 " zdep %5,29,2,%%r19\n" /* r19=(ofs&3)*8 */ in emulate_ldd()
233 " mtsp %6, %%sr1\n" in emulate_ldd()
234 " dep %%r0,31,2,%5\n" in emulate_ldd()
235 "1: ldw 0(%%sr1,%5),%0\n" in emulate_ldd()
236 "2: ldw 4(%%sr1,%5),%1\n" in emulate_ldd()
237 "3: ldw 8(%%sr1,%5),%%r20\n" in emulate_ldd()
238 " subi 32,%%r19,%%r19\n" in emulate_ldd()
239 " mtsar %%r19\n" in emulate_ldd()
240 " vshd %0,%1,%0\n" in emulate_ldd()
241 " vshd %1,%%r20,%1\n" in emulate_ldd()
242 " copy %%r0, %2\n" in emulate_ldd()
243 "4: \n" in emulate_ldd()
244 " .section .fixup,\"ax\"\n" in emulate_ldd()
245 "5: ldi -2, %2\n" in emulate_ldd()
247 " .previous\n" in emulate_ldd()
250 ASM_EXCEPTIONTABLE_ENTRY(3b,5b) in emulate_ldd()
252 : "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr) in emulate_ldd()
258 DPRINTF("val = 0x%llx\n", val); in emulate_ldd()
261 regs->fr[toreg] = val; in emulate_ldd()
263 regs->gr[toreg] = val; in emulate_ldd()
270 unsigned long val = regs->gr[frreg]; in emulate_sth()
276 DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg, in emulate_sth()
277 val, regs->isr, regs->ior); in emulate_sth()
280 " mtsp %3, %%sr1\n" in emulate_sth()
281 " extrw,u %1, 23, 8, %%r19\n" in emulate_sth()
282 "1: stb %1, 1(%%sr1, %2)\n" in emulate_sth()
283 "2: stb %%r19, 0(%%sr1, %2)\n" in emulate_sth()
284 " copy %%r0, %0\n" in emulate_sth()
285 "3: \n" in emulate_sth()
286 " .section .fixup,\"ax\"\n" in emulate_sth()
287 "4: ldi -2, %0\n" in emulate_sth()
288 FIXUP_BRANCH(3b) in emulate_sth()
289 " .previous\n" in emulate_sth()
293 : "r" (val), "r" (regs->ior), "r" (regs->isr) in emulate_sth()
305 val = ((__u32*)(regs->fr))[frreg]; in emulate_stw()
307 val = regs->gr[frreg]; in emulate_stw()
311 DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg, in emulate_stw()
312 val, regs->isr, regs->ior); in emulate_stw()
316 " mtsp %3, %%sr1\n" in emulate_stw()
317 " zdep %2, 28, 2, %%r19\n" in emulate_stw()
318 " dep %%r0, 31, 2, %2\n" in emulate_stw()
319 " mtsar %%r19\n" in emulate_stw()
320 " depwi,z -2, %%sar, 32, %%r19\n" in emulate_stw()
321 "1: ldw 0(%%sr1,%2),%%r20\n" in emulate_stw()
322 "2: ldw 4(%%sr1,%2),%%r21\n" in emulate_stw()
323 " vshd %%r0, %1, %%r22\n" in emulate_stw()
324 " vshd %1, %%r0, %%r1\n" in emulate_stw()
325 " and %%r20, %%r19, %%r20\n" in emulate_stw()
326 " andcm %%r21, %%r19, %%r21\n" in emulate_stw()
327 " or %%r22, %%r20, %%r20\n" in emulate_stw()
328 " or %%r1, %%r21, %%r21\n" in emulate_stw()
329 " stw %%r20,0(%%sr1,%2)\n" in emulate_stw()
330 " stw %%r21,4(%%sr1,%2)\n" in emulate_stw()
331 " copy %%r0, %0\n" in emulate_stw()
332 "3: \n" in emulate_stw()
333 " .section .fixup,\"ax\"\n" in emulate_stw()
334 "4: ldi -2, %0\n" in emulate_stw()
335 FIXUP_BRANCH(3b) in emulate_stw()
336 " .previous\n" in emulate_stw()
340 : "r" (val), "r" (regs->ior), "r" (regs->isr) in emulate_stw()
351 val = regs->fr[frreg]; in emulate_std()
353 val = regs->gr[frreg]; in emulate_std()
357 DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg, in emulate_std()
358 val, regs->isr, regs->ior); in emulate_std()
363 return -1; in emulate_std()
366 " mtsp %3, %%sr1\n" in emulate_std()
367 " depd,z %2, 60, 3, %%r19\n" in emulate_std()
368 " depd %%r0, 63, 3, %2\n" in emulate_std()
369 " mtsar %%r19\n" in emulate_std()
370 " depdi,z -2, %%sar, 64, %%r19\n" in emulate_std()
371 "1: ldd 0(%%sr1,%2),%%r20\n" in emulate_std()
372 "2: ldd 8(%%sr1,%2),%%r21\n" in emulate_std()
373 " shrpd %%r0, %1, %%sar, %%r22\n" in emulate_std()
374 " shrpd %1, %%r0, %%sar, %%r1\n" in emulate_std()
375 " and %%r20, %%r19, %%r20\n" in emulate_std()
376 " andcm %%r21, %%r19, %%r21\n" in emulate_std()
377 " or %%r22, %%r20, %%r20\n" in emulate_std()
378 " or %%r1, %%r21, %%r21\n" in emulate_std()
379 "3: std %%r20,0(%%sr1,%2)\n" in emulate_std()
380 "4: std %%r21,8(%%sr1,%2)\n" in emulate_std()
381 " copy %%r0, %0\n" in emulate_std()
382 "5: \n" in emulate_std()
383 " .section .fixup,\"ax\"\n" in emulate_std()
384 "6: ldi -2, %0\n" in emulate_std()
386 " .previous\n" in emulate_std()
389 ASM_EXCEPTIONTABLE_ENTRY(3b,6b) in emulate_std()
392 : "r" (val), "r" (regs->ior), "r" (regs->isr) in emulate_std()
398 " mtsp %4, %%sr1\n" in emulate_std()
399 " zdep %2, 29, 2, %%r19\n" in emulate_std()
400 " dep %%r0, 31, 2, %2\n" in emulate_std()
401 " mtsar %%r19\n" in emulate_std()
402 " zvdepi -2, 32, %%r19\n" in emulate_std()
403 "1: ldw 0(%%sr1,%3),%%r20\n" in emulate_std()
404 "2: ldw 8(%%sr1,%3),%%r21\n" in emulate_std()
405 " vshd %1, %2, %%r1\n" in emulate_std()
406 " vshd %%r0, %1, %1\n" in emulate_std()
407 " vshd %2, %%r0, %2\n" in emulate_std()
408 " and %%r20, %%r19, %%r20\n" in emulate_std()
409 " andcm %%r21, %%r19, %%r21\n" in emulate_std()
410 " or %1, %%r20, %1\n" in emulate_std()
411 " or %2, %%r21, %2\n" in emulate_std()
412 "3: stw %1,0(%%sr1,%1)\n" in emulate_std()
413 "4: stw %%r1,4(%%sr1,%3)\n" in emulate_std()
414 "5: stw %2,8(%%sr1,%3)\n" in emulate_std()
415 " copy %%r0, %0\n" in emulate_std()
416 "6: \n" in emulate_std()
417 " .section .fixup,\"ax\"\n" in emulate_std()
418 "7: ldi -2, %0\n" in emulate_std()
420 " .previous\n" in emulate_std()
423 ASM_EXCEPTIONTABLE_ENTRY(3b,7b) in emulate_std()
427 : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr) in emulate_std()
438 unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0; in handle_unaligned()
447 if (current->thread.flags & PARISC_UAC_SIGBUS) { in handle_unaligned()
451 if (!(current->thread.flags & PARISC_UAC_NOPRINT) && in handle_unaligned()
454 sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n", in handle_unaligned()
455 current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]); in handle_unaligned()
466 /* handle modification - OK, it's ugly, see the instruction manual */ in handle_unaligned()
467 switch (MAJOR_OP(regs->iir)) in handle_unaligned()
472 if (regs->iir&0x20) in handle_unaligned()
475 if (regs->iir&0x1000) /* short loads */ in handle_unaligned()
476 if (regs->iir&0x200) in handle_unaligned()
477 newbase += IM5_3(regs->iir); in handle_unaligned()
479 newbase += IM5_2(regs->iir); in handle_unaligned()
480 else if (regs->iir&0x2000) /* scaled indexed */ in handle_unaligned()
483 switch (regs->iir & OPCODE1_MASK) in handle_unaligned()
491 shift= 3; break; in handle_unaligned()
493 newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<<shift; in handle_unaligned()
495 newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0); in handle_unaligned()
501 newbase += IM14(regs->iir); in handle_unaligned()
505 if (regs->iir&8) in handle_unaligned()
508 newbase += IM14(regs->iir&~0xe); in handle_unaligned()
514 newbase += IM14(regs->iir&6); in handle_unaligned()
518 if (regs->iir&4) in handle_unaligned()
521 newbase += IM14(regs->iir&~4); in handle_unaligned()
527 switch (regs->iir & OPCODE1_MASK) in handle_unaligned()
531 ret = emulate_ldh(regs, R3(regs->iir)); in handle_unaligned()
538 ret = emulate_ldw(regs, R3(regs->iir),0); in handle_unaligned()
542 ret = emulate_sth(regs, R2(regs->iir)); in handle_unaligned()
547 ret = emulate_stw(regs, R2(regs->iir),0); in handle_unaligned()
555 ret = emulate_ldd(regs, R3(regs->iir),0); in handle_unaligned()
560 ret = emulate_std(regs, R2(regs->iir),0); in handle_unaligned()
569 ret = emulate_ldw(regs,FR3(regs->iir),1); in handle_unaligned()
575 ret = emulate_ldd(regs,R3(regs->iir),1); in handle_unaligned()
583 ret = emulate_stw(regs,FR3(regs->iir),1); in handle_unaligned()
589 ret = emulate_std(regs,R3(regs->iir),1); in handle_unaligned()
600 switch (regs->iir & OPCODE2_MASK) in handle_unaligned()
604 ret = emulate_ldd(regs,R2(regs->iir),1); in handle_unaligned()
608 ret = emulate_std(regs, R2(regs->iir),1); in handle_unaligned()
611 ret = emulate_ldd(regs, R2(regs->iir),0); in handle_unaligned()
614 ret = emulate_std(regs, R2(regs->iir),0); in handle_unaligned()
618 switch (regs->iir & OPCODE3_MASK) in handle_unaligned()
622 ret = emulate_ldw(regs, R2(regs->iir),0); in handle_unaligned()
625 ret = emulate_ldw(regs, R2(regs->iir),1); in handle_unaligned()
630 ret = emulate_stw(regs, R2(regs->iir),1); in handle_unaligned()
633 ret = emulate_stw(regs, R2(regs->iir),0); in handle_unaligned()
636 switch (regs->iir & OPCODE4_MASK) in handle_unaligned()
639 ret = emulate_ldh(regs, R2(regs->iir)); in handle_unaligned()
643 ret = emulate_ldw(regs, R2(regs->iir),0); in handle_unaligned()
646 ret = emulate_sth(regs, R2(regs->iir)); in handle_unaligned()
650 ret = emulate_stw(regs, R2(regs->iir),0); in handle_unaligned()
654 if (ret == 0 && modify && R1(regs->iir)) in handle_unaligned()
655 regs->gr[R1(regs->iir)] = newbase; in handle_unaligned()
659 printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir); in handle_unaligned()
661 DPRINTF("ret = %d\n", ret); in handle_unaligned()
673 printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); in handle_unaligned()
679 (void __user *)regs->ior); in handle_unaligned()
686 (void __user *)regs->ior); in handle_unaligned()
693 regs->gr[0]|=PSW_N; in handle_unaligned()
709 switch (regs->iir & OPCODE1_MASK) { in check_unaligned()
723 align_mask = 3UL; in check_unaligned()
727 switch (regs->iir & OPCODE4_MASK) { in check_unaligned()
736 align_mask = 3UL; in check_unaligned()
742 return (int)(regs->ior & align_mask); in check_unaligned()