Lines Matching +full:len +full:- +full:or +full:- +full:define

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
10 #define INSN_OPCODE_MASK 0x007c
11 #define INSN_OPCODE_SHIFT 2
12 #define INSN_OPCODE_SYSTEM 28
14 #define INSN_MASK_WFI 0xffffffff
15 #define INSN_MATCH_WFI 0x10500073
17 #define INSN_MATCH_CSRRW 0x1073
18 #define INSN_MASK_CSRRW 0x707f
19 #define INSN_MATCH_CSRRS 0x2073
20 #define INSN_MASK_CSRRS 0x707f
21 #define INSN_MATCH_CSRRC 0x3073
22 #define INSN_MASK_CSRRC 0x707f
23 #define INSN_MATCH_CSRRWI 0x5073
24 #define INSN_MASK_CSRRWI 0x707f
25 #define INSN_MATCH_CSRRSI 0x6073
26 #define INSN_MASK_CSRRSI 0x707f
27 #define INSN_MATCH_CSRRCI 0x7073
28 #define INSN_MASK_CSRRCI 0x707f
30 #define INSN_MATCH_LB 0x3
31 #define INSN_MASK_LB 0x707f
32 #define INSN_MATCH_LH 0x1003
33 #define INSN_MASK_LH 0x707f
34 #define INSN_MATCH_LW 0x2003
35 #define INSN_MASK_LW 0x707f
36 #define INSN_MATCH_LD 0x3003
37 #define INSN_MASK_LD 0x707f
38 #define INSN_MATCH_LBU 0x4003
39 #define INSN_MASK_LBU 0x707f
40 #define INSN_MATCH_LHU 0x5003
41 #define INSN_MASK_LHU 0x707f
42 #define INSN_MATCH_LWU 0x6003
43 #define INSN_MASK_LWU 0x707f
44 #define INSN_MATCH_SB 0x23
45 #define INSN_MASK_SB 0x707f
46 #define INSN_MATCH_SH 0x1023
47 #define INSN_MASK_SH 0x707f
48 #define INSN_MATCH_SW 0x2023
49 #define INSN_MASK_SW 0x707f
50 #define INSN_MATCH_SD 0x3023
51 #define INSN_MASK_SD 0x707f
53 #define INSN_MATCH_C_LD 0x6000
54 #define INSN_MASK_C_LD 0xe003
55 #define INSN_MATCH_C_SD 0xe000
56 #define INSN_MASK_C_SD 0xe003
57 #define INSN_MATCH_C_LW 0x4000
58 #define INSN_MASK_C_LW 0xe003
59 #define INSN_MATCH_C_SW 0xc000
60 #define INSN_MASK_C_SW 0xe003
61 #define INSN_MATCH_C_LDSP 0x6002
62 #define INSN_MASK_C_LDSP 0xe003
63 #define INSN_MATCH_C_SDSP 0xe002
64 #define INSN_MASK_C_SDSP 0xe003
65 #define INSN_MATCH_C_LWSP 0x4002
66 #define INSN_MASK_C_LWSP 0xe003
67 #define INSN_MATCH_C_SWSP 0xc002
68 #define INSN_MASK_C_SWSP 0xe003
70 #define INSN_16BIT_MASK 0x3
72 #define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
74 #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
77 #define LOG_REGBYTES 3
79 #define LOG_REGBYTES 2
81 #define REGBYTES (1 << LOG_REGBYTES)
83 #define SH_RD 7
84 #define SH_RS1 15
85 #define SH_RS2 20
86 #define SH_RS2C 2
87 #define MASK_RX 0x1f
89 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
90 #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
93 #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
95 #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
98 #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
101 #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
103 #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
105 #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
106 #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
107 #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
109 #define SHIFT_RIGHT(x, y) \
110 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
112 #define REG_MASK \
113 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
115 #define REG_OFFSET(insn, pos) \
116 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
118 #define REG_PTR(insn, pos, regs) \
121 #define GET_FUNCT3(insn) (((insn) >> 12) & 7)
123 #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
124 #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
125 #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
126 #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
127 #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
128 #define GET_SP(regs) (*REG_PTR(2, 0, regs))
129 #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
130 #define IMM_I(insn) ((s32)(insn) >> 20)
131 #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
140 * 2) Returns 0 for exit to user-space
157 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_illegal_insn()
173 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_virtual_insn()
184 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
199 vcpu->stat.wfi_exit_stat++; in wfi_insn()
222 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
223 * emulation or in-kernel emulation
234 if (vcpu->arch.csr_decode.return_handled) in kvm_riscv_vcpu_csr_return()
236 vcpu->arch.csr_decode.return_handled = 1; in kvm_riscv_vcpu_csr_return()
239 insn = vcpu->arch.csr_decode.insn; in kvm_riscv_vcpu_csr_return()
241 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_csr_return()
242 run->riscv_csr.ret_value); in kvm_riscv_vcpu_csr_return()
245 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in kvm_riscv_vcpu_csr_return()
255 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context); in csr_insn()
262 wr_mask = -1UL; in csr_insn()
267 new_val = -1UL; in csr_insn()
274 wr_mask = -1UL; in csr_insn()
279 new_val = -1UL; in csr_insn()
290 vcpu->arch.csr_decode.insn = insn; in csr_insn()
291 vcpu->arch.csr_decode.return_handled = 0; in csr_insn()
294 run->riscv_csr.csr_num = csr_num; in csr_insn()
295 run->riscv_csr.new_value = new_val; in csr_insn()
296 run->riscv_csr.write_mask = wr_mask; in csr_insn()
297 run->riscv_csr.ret_value = 0; in csr_insn()
299 /* Find in-kernel CSR function */ in csr_insn()
302 if ((tcfn->base <= csr_num) && in csr_insn()
303 (csr_num < (tcfn->base + tcfn->count))) { in csr_insn()
309 /* First try in-kernel CSR emulation */ in csr_insn()
310 if (cfn && cfn->func) { in csr_insn()
311 rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask); in csr_insn()
314 run->riscv_csr.ret_value = val; in csr_insn()
315 vcpu->stat.csr_exit_kernel++; in csr_insn()
323 /* Exit to user-space for CSR emulation */ in csr_insn()
325 vcpu->stat.csr_exit_user++; in csr_insn()
326 run->exit_reason = KVM_EXIT_RISCV_CSR; in csr_insn()
378 if ((insn & ifn->mask) == ifn->match) { in system_opcode_insn()
379 rc = ifn->func(vcpu, run, insn); in system_opcode_insn()
390 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in system_opcode_insn()
400 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
406 * Returns > 0 to continue run-loop
407 * Returns 0 to exit run-loop and handle in user-space.
408 * Returns < 0 to report failure and exit run-loop
413 unsigned long insn = trap->stval; in kvm_riscv_vcpu_virtual_insn()
419 ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_virtual_insn()
421 ct->sepc, in kvm_riscv_vcpu_virtual_insn()
424 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_virtual_insn()
442 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
449 * Returns > 0 to continue run-loop
450 * Returns 0 to exit run-loop and handle in user-space.
451 * Returns < 0 to report failure and exit run-loop
459 int shift = 0, len = 0, insn_len = 0; in kvm_riscv_vcpu_mmio_load() local
461 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_load()
467 * transformed instruction or custom instruction. in kvm_riscv_vcpu_mmio_load()
474 * zero or special value. in kvm_riscv_vcpu_mmio_load()
476 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, in kvm_riscv_vcpu_mmio_load()
480 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_mmio_load()
489 len = 4; in kvm_riscv_vcpu_mmio_load()
490 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
492 len = 1; in kvm_riscv_vcpu_mmio_load()
493 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
495 len = 1; in kvm_riscv_vcpu_mmio_load()
496 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
499 len = 8; in kvm_riscv_vcpu_mmio_load()
500 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
502 len = 4; in kvm_riscv_vcpu_mmio_load()
505 len = 2; in kvm_riscv_vcpu_mmio_load()
506 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
508 len = 2; in kvm_riscv_vcpu_mmio_load()
511 len = 8; in kvm_riscv_vcpu_mmio_load()
512 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
516 len = 8; in kvm_riscv_vcpu_mmio_load()
517 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
520 len = 4; in kvm_riscv_vcpu_mmio_load()
521 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
525 len = 4; in kvm_riscv_vcpu_mmio_load()
526 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
528 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_load()
532 if (fault_addr & (len - 1)) in kvm_riscv_vcpu_mmio_load()
533 return -EIO; in kvm_riscv_vcpu_mmio_load()
536 vcpu->arch.mmio_decode.insn = insn; in kvm_riscv_vcpu_mmio_load()
537 vcpu->arch.mmio_decode.insn_len = insn_len; in kvm_riscv_vcpu_mmio_load()
538 vcpu->arch.mmio_decode.shift = shift; in kvm_riscv_vcpu_mmio_load()
539 vcpu->arch.mmio_decode.len = len; in kvm_riscv_vcpu_mmio_load()
540 vcpu->arch.mmio_decode.return_handled = 0; in kvm_riscv_vcpu_mmio_load()
543 run->mmio.is_write = false; in kvm_riscv_vcpu_mmio_load()
544 run->mmio.phys_addr = fault_addr; in kvm_riscv_vcpu_mmio_load()
545 run->mmio.len = len; in kvm_riscv_vcpu_mmio_load()
548 if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) { in kvm_riscv_vcpu_mmio_load()
550 memcpy(run->mmio.data, data_buf, len); in kvm_riscv_vcpu_mmio_load()
551 vcpu->stat.mmio_exit_kernel++; in kvm_riscv_vcpu_mmio_load()
557 vcpu->stat.mmio_exit_user++; in kvm_riscv_vcpu_mmio_load()
558 run->exit_reason = KVM_EXIT_MMIO; in kvm_riscv_vcpu_mmio_load()
564 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
571 * Returns > 0 to continue run-loop
572 * Returns 0 to exit run-loop and handle in user-space.
573 * Returns < 0 to report failure and exit run-loop
585 int len = 0, insn_len = 0; in kvm_riscv_vcpu_mmio_store() local
587 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_store()
593 * transformed instruction or custom instruction. in kvm_riscv_vcpu_mmio_store()
600 * zero or special value. in kvm_riscv_vcpu_mmio_store()
602 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, in kvm_riscv_vcpu_mmio_store()
606 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_mmio_store()
613 data = GET_RS2(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
617 len = 4; in kvm_riscv_vcpu_mmio_store()
619 len = 1; in kvm_riscv_vcpu_mmio_store()
622 len = 8; in kvm_riscv_vcpu_mmio_store()
625 len = 2; in kvm_riscv_vcpu_mmio_store()
628 len = 8; in kvm_riscv_vcpu_mmio_store()
629 data64 = GET_RS2S(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
632 len = 8; in kvm_riscv_vcpu_mmio_store()
633 data64 = GET_RS2C(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
636 len = 4; in kvm_riscv_vcpu_mmio_store()
637 data32 = GET_RS2S(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
640 len = 4; in kvm_riscv_vcpu_mmio_store()
641 data32 = GET_RS2C(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
643 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_store()
647 if (fault_addr & (len - 1)) in kvm_riscv_vcpu_mmio_store()
648 return -EIO; in kvm_riscv_vcpu_mmio_store()
651 vcpu->arch.mmio_decode.insn = insn; in kvm_riscv_vcpu_mmio_store()
652 vcpu->arch.mmio_decode.insn_len = insn_len; in kvm_riscv_vcpu_mmio_store()
653 vcpu->arch.mmio_decode.shift = 0; in kvm_riscv_vcpu_mmio_store()
654 vcpu->arch.mmio_decode.len = len; in kvm_riscv_vcpu_mmio_store()
655 vcpu->arch.mmio_decode.return_handled = 0; in kvm_riscv_vcpu_mmio_store()
658 switch (len) { in kvm_riscv_vcpu_mmio_store()
660 *((u8 *)run->mmio.data) = data8; in kvm_riscv_vcpu_mmio_store()
663 *((u16 *)run->mmio.data) = data16; in kvm_riscv_vcpu_mmio_store()
666 *((u32 *)run->mmio.data) = data32; in kvm_riscv_vcpu_mmio_store()
669 *((u64 *)run->mmio.data) = data64; in kvm_riscv_vcpu_mmio_store()
672 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_store()
676 run->mmio.is_write = true; in kvm_riscv_vcpu_mmio_store()
677 run->mmio.phys_addr = fault_addr; in kvm_riscv_vcpu_mmio_store()
678 run->mmio.len = len; in kvm_riscv_vcpu_mmio_store()
682 fault_addr, len, run->mmio.data)) { in kvm_riscv_vcpu_mmio_store()
684 vcpu->stat.mmio_exit_kernel++; in kvm_riscv_vcpu_mmio_store()
690 vcpu->stat.mmio_exit_user++; in kvm_riscv_vcpu_mmio_store()
691 run->exit_reason = KVM_EXIT_MMIO; in kvm_riscv_vcpu_mmio_store()
697 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
698 * or in-kernel IO emulation
710 int len, shift; in kvm_riscv_vcpu_mmio_return() local
712 if (vcpu->arch.mmio_decode.return_handled) in kvm_riscv_vcpu_mmio_return()
715 vcpu->arch.mmio_decode.return_handled = 1; in kvm_riscv_vcpu_mmio_return()
716 insn = vcpu->arch.mmio_decode.insn; in kvm_riscv_vcpu_mmio_return()
718 if (run->mmio.is_write) in kvm_riscv_vcpu_mmio_return()
721 len = vcpu->arch.mmio_decode.len; in kvm_riscv_vcpu_mmio_return()
722 shift = vcpu->arch.mmio_decode.shift; in kvm_riscv_vcpu_mmio_return()
724 switch (len) { in kvm_riscv_vcpu_mmio_return()
726 data8 = *((u8 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
727 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
731 data16 = *((u16 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
732 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
736 data32 = *((u32 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
737 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
741 data64 = *((u64 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
742 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
746 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_return()
751 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; in kvm_riscv_vcpu_mmio_return()