Lines Matching defs:hc

1886 static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
1897 if (hc->fast) {
1902 if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves)
1906 j = i + hc->consumed_xmm_halves;
1908 data[i] = sse128_hi(hc->xmm[j / 2]);
1910 data[i] = sse128_lo(hc->xmm[j / 2]);
1915 return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data,
1919 static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
1922 if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS)
1926 return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS,
1930 static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
1932 return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries);
2008 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2044 if (!hc->fast && is_guest_mode(vcpu)) {
2045 hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL);
2046 if (unlikely(hc->ingpa == INVALID_GPA))
2050 if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
2051 hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) {
2052 if (hc->fast) {
2053 flush.address_space = hc->ingpa;
2054 flush.flags = hc->outgpa;
2055 flush.processor_mask = sse128_lo(hc->xmm[0]);
2056 hc->consumed_xmm_halves = 1;
2058 if (unlikely(kvm_read_guest(kvm, hc->ingpa,
2061 hc->data_offset = sizeof(flush);
2081 if (hc->fast) {
2082 flush_ex.address_space = hc->ingpa;
2083 flush_ex.flags = hc->outgpa;
2085 &hc->xmm[0], sizeof(hc->xmm[0]));
2086 hc->consumed_xmm_halves = 2;
2088 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
2091 hc->data_offset = sizeof(flush_ex);
2103 if (hc->var_cnt != hweight64(valid_bank_mask))
2107 if (!hc->var_cnt)
2110 if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2121 if (hc->fast)
2122 hc->consumed_xmm_halves += hc->var_cnt;
2124 hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
2127 if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
2128 hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
2129 hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
2132 if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
2145 tlb_flush_entries, hc->rep_cnt);
2158 tlb_flush_entries, hc->rep_cnt);
2191 tlb_flush_entries, hc->rep_cnt);
2200 ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
2224 static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2238 if (hc->code == HVCALL_SEND_IPI) {
2239 if (!hc->fast) {
2240 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
2247 if (unlikely(hc->ingpa >> 32 != 0))
2249 sparse_banks[0] = hc->outgpa;
2250 vector = (u32)hc->ingpa;
2257 if (!hc->fast) {
2258 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
2262 send_ipi_ex.vector = (u32)hc->ingpa;
2263 send_ipi_ex.vp_set.format = hc->outgpa;
2264 send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]);
2275 if (hc->var_cnt != hweight64(valid_bank_mask))
2281 if (!hc->var_cnt)
2284 if (!hc->fast)
2285 hc->data_offset = offsetof(struct hv_send_ipi_ex,
2288 hc->consumed_xmm_halves = 1;
2290 if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2414 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2419 if (unlikely(!hc->fast)) {
2421 gpa_t gpa = hc->ingpa;
2423 if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2424 offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2428 &hc->ingpa, sizeof(hc->ingpa));
2438 if (hc->ingpa & 0xffff00000000ULL)
2441 if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2446 eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2455 static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
2457 switch (hc->code) {
2469 static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
2475 _kvm_read_sse_reg(reg, &hc->xmm[reg]);
2532 struct kvm_hv_hcall hc;
2546 hc.param = kvm_rcx_read(vcpu);
2547 hc.ingpa = kvm_rdx_read(vcpu);
2548 hc.outgpa = kvm_r8_read(vcpu);
2552 hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2554 hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2556 hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2560 hc.code = hc.param & 0xffff;
2561 hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET;
2562 hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2563 hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2564 hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2565 hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2567 trace_kvm_hv_hypercall(hc.code, hc.fast, hc.var_cnt, hc.rep_cnt,
2568 hc.rep_idx, hc.ingpa, hc.outgpa);
2570 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2575 if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) {
2580 if (hc.fast && is_xmm_fast_hypercall(&hc)) {
2588 kvm_hv_hypercall_read_xmm(&hc);
2591 switch (hc.code) {
2593 if (unlikely(hc.rep || hc.var_cnt)) {
2600 if (unlikely(hc.rep || hc.var_cnt)) {
2604 ret = kvm_hvcall_signal_event(vcpu, &hc);
2610 if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) {
2616 if (unlikely(hc.var_cnt)) {
2622 if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2626 ret = kvm_hv_flush_tlb(vcpu, &hc);
2629 if (unlikely(hc.var_cnt)) {
2635 if (unlikely(hc.rep)) {
2639 ret = kvm_hv_flush_tlb(vcpu, &hc);
2642 if (unlikely(hc.var_cnt)) {
2648 if (unlikely(hc.rep)) {
2652 ret = kvm_hv_send_ipi(vcpu, &hc);
2656 if (unlikely(hc.fast)) {
2676 if (unlikely(hc.fast)) {
2692 vcpu->run->hyperv.u.hcall.input = hc.param;
2693 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2694 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;