Home
last modified time | relevance | path

Searched refs:seg (Results 1 – 25 of 35) sorted by relevance

12

/qemu/target/i386/hvf/
H A Dx86_descr.c24 #define VMX_SEGMENT_FIELD(seg) \ argument
25 [R_##seg] = { \
26 .selector = VMCS_GUEST_##seg##_SELECTOR, \
27 .base = VMCS_GUEST_##seg##_BASE, \
28 .limit = VMCS_GUEST_##seg##_LIMIT, \
29 .ar_bytes = VMCS_GUEST_##seg##_ACCESS_RIGHTS, \
48 uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg) in vmx_read_segment_limit() argument
50 return (uint32_t)rvmcs(cpu->accel->fd, vmx_segment_fields[seg].limit); in vmx_read_segment_limit()
53 uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg) in vmx_read_segment_ar() argument
55 return (uint32_t)rvmcs(cpu->accel->fd, vmx_segment_fields[seg].ar_bytes); in vmx_read_segment_ar()
[all …]
H A Dx86_descr.h33 struct vmx_segment *desc, enum X86Seg seg);
35 enum X86Seg seg);
38 enum X86Seg seg);
41 enum X86Seg seg);
43 uint64_t vmx_read_segment_base(CPUState *cpu, enum X86Seg seg);
44 void vmx_write_segment_base(CPUState *cpu, enum X86Seg seg,
52 uint32_t vmx_read_segment_limit(CPUState *cpu, enum X86Seg seg);
53 uint32_t vmx_read_segment_ar(CPUState *cpu, enum X86Seg seg);
H A Dx86hvf.c88 struct vmx_segment seg; in hvf_put_segments() local
104 hvf_set_segment(cs, &seg, &env->segs[R_CS], false); in hvf_put_segments()
105 vmx_write_segment_descriptor(cs, &seg, R_CS); in hvf_put_segments()
107 hvf_set_segment(cs, &seg, &env->segs[R_DS], false); in hvf_put_segments()
108 vmx_write_segment_descriptor(cs, &seg, R_DS); in hvf_put_segments()
110 hvf_set_segment(cs, &seg, &env->segs[R_ES], false); in hvf_put_segments()
111 vmx_write_segment_descriptor(cs, &seg, R_ES); in hvf_put_segments()
113 hvf_set_segment(cs, &seg, &env->segs[R_SS], false); in hvf_put_segments()
114 vmx_write_segment_descriptor(cs, &seg, R_SS); in hvf_put_segments()
116 hvf_set_segment(cs, &seg, &env->segs[R_FS], false); in hvf_put_segments()
[all …]
H A Dx86.c161 target_ulong linear_addr(CPUState *cpu, target_ulong addr, X86Seg seg) in linear_addr() argument
163 return vmx_read_segment_base(cpu, seg) + addr; in linear_addr()
167 X86Seg seg) in linear_addr_size() argument
179 return linear_addr(cpu, addr, seg); in linear_addr_size()
/qemu/hw/ssi/
H A Daspeed_smc.c239 AspeedSegments seg; in aspeed_smc_flash_overlap() local
247 asc->reg_to_segment(s, s->regs[R_SEG_ADDR0 + i], &seg); in aspeed_smc_flash_overlap()
249 if (new->addr + new->size > seg.addr && in aspeed_smc_flash_overlap()
250 new->addr < seg.addr + seg.size) { in aspeed_smc_flash_overlap()
255 i, seg.addr, seg.addr + seg.size); in aspeed_smc_flash_overlap()
267 AspeedSegments seg; in aspeed_smc_flash_set_segment_region() local
269 asc->reg_to_segment(s, regval, &seg); in aspeed_smc_flash_set_segment_region()
272 memory_region_set_size(&fl->mmio, seg.size); in aspeed_smc_flash_set_segment_region()
273 memory_region_set_address(&fl->mmio, seg.addr - asc->flash_window_base); in aspeed_smc_flash_set_segment_region()
274 memory_region_set_enabled(&fl->mmio, !!seg.size); in aspeed_smc_flash_set_segment_region()
[all …]
/qemu/hw/xen/
H A Dxen-operations.c52 XenGrantCopySegment *seg = &segs[i]; in libxengnttab_fallback_grant_copy() local
54 refs[i] = to_domain ? seg->dest.foreign.ref : in libxengnttab_fallback_grant_copy()
55 seg->source.foreign.ref; in libxengnttab_fallback_grant_copy()
68 XenGrantCopySegment *seg = &segs[i]; in libxengnttab_fallback_grant_copy() local
72 memcpy(page + seg->dest.foreign.offset, seg->source.virt, in libxengnttab_fallback_grant_copy()
73 seg->len); in libxengnttab_fallback_grant_copy()
75 memcpy(seg->dest.virt, page + seg->source.foreign.offset, in libxengnttab_fallback_grant_copy()
76 seg->len); in libxengnttab_fallback_grant_copy()
106 XenGrantCopySegment *seg = &segs[i]; in libxengnttab_backend_grant_copy() local
112 xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref; in libxengnttab_backend_grant_copy()
[all …]
/qemu/hw/block/
H A Dxen_blkif.h28 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; member
54 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; member
115 dst->seg[i] = src->seg[i]; in blkif_get_x86_32_req()
141 dst->seg[i] = src->seg[i]; in blkif_get_x86_64_req()
/qemu/hw/net/
H A Dvirtio-net.c2119 VirtioNetRscSeg *seg) in virtio_net_rsc_drain_seg() argument
2124 h = (struct virtio_net_hdr_v1 *)seg->buf; in virtio_net_rsc_drain_seg()
2128 if (seg->is_coalesced) { in virtio_net_rsc_drain_seg()
2129 h->rsc.segments = seg->packets; in virtio_net_rsc_drain_seg()
2130 h->rsc.dup_acks = seg->dup_ack; in virtio_net_rsc_drain_seg()
2139 ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size); in virtio_net_rsc_drain_seg()
2140 QTAILQ_REMOVE(&chain->buffers, seg, next); in virtio_net_rsc_drain_seg()
2141 g_free(seg->buf); in virtio_net_rsc_drain_seg()
2142 g_free(seg); in virtio_net_rsc_drain_seg()
2149 VirtioNetRscSeg *seg, *rn; in virtio_net_rsc_purge() local
[all …]
/qemu/hw/i386/kvm/
H A Dxen_gnttab.c464 XenGrantCopySegment *seg = &segs[i]; in xen_be_gnttab_copy() local
466 uint32_t ref = to_domain ? seg->dest.foreign.ref : in xen_be_gnttab_copy()
467 seg->source.foreign.ref; in xen_be_gnttab_copy()
479 memcpy(page + seg->dest.foreign.offset, seg->source.virt, in xen_be_gnttab_copy()
480 seg->len); in xen_be_gnttab_copy()
482 memcpy(seg->dest.virt, page + seg->source.foreign.offset, in xen_be_gnttab_copy()
483 seg->len); in xen_be_gnttab_copy()
/qemu/hw/block/dataplane/
H A Dxen-block.c185 if (request->req.seg[i].first_sect > request->req.seg[i].last_sect) { in xen_block_parse_request()
189 if (request->req.seg[i].last_sect * dataplane->sector_size >= in xen_block_parse_request()
195 len = (request->req.seg[i].last_sect - in xen_block_parse_request()
196 request->req.seg[i].first_sect + 1) * dataplane->sector_size; in xen_block_parse_request()
228 segs[i].dest.foreign.ref = request->req.seg[i].gref; in xen_block_copy_request()
229 segs[i].dest.foreign.offset = request->req.seg[i].first_sect * in xen_block_copy_request()
233 segs[i].source.foreign.ref = request->req.seg[i].gref; in xen_block_copy_request()
234 segs[i].source.foreign.offset = request->req.seg[i].first_sect * in xen_block_copy_request()
238 segs[i].len = (request->req.seg[i].last_sect - in xen_block_copy_request()
239 request->req.seg[i].first_sect + 1) * in xen_block_copy_request()
/qemu/tests/tcg/i386/
H A Dtest-i386.c1260 #define TEST_LR(op, size, seg, mask)\ argument
1263 uint16_t mseg = seg;\
1298 uint16_t seg; in test_segs() member
1363 segoff.seg = MK_SEL(2); in test_segs()
1463 uint16_t seg; in test_misc()
1476 desc.seg = cs_sel; in test_misc()
1591 static inline uint8_t *seg_to_linear(unsigned int seg, unsigned int reg) in seg_to_linear() argument
1593 return (uint8_t *)((seg << 4) + (reg & 0xffff)); in seg_to_linear()
1618 int seg, ret; in test_vm86() local
1633 seg = VM86_CODE_CS; in test_vm86()
[all …]
/qemu/hw/usb/
H A Dxen-usb.c148 struct usbif_request_segment *seg; in usbback_gnttab_map() local
163 if ((unsigned)usbback_req->req.seg[i].offset + in usbback_gnttab_map()
164 (unsigned)usbback_req->req.seg[i].length > XEN_PAGE_SIZE) { in usbback_gnttab_map()
176 ref[i] = usbback_req->req.seg[i].gref; in usbback_gnttab_map()
187 seg = usbback_req->req.seg + i; in usbback_gnttab_map()
188 addr = usbback_req->buffer + i * XEN_PAGE_SIZE + seg->offset; in usbback_gnttab_map()
189 qemu_iovec_add(&usbback_req->packet.iov, addr, seg->length); in usbback_gnttab_map()
210 ref[i] = usbback_req->req.seg[i + usbback_req->req.nr_buffer_segs].gref; in usbback_gnttab_map()
300 ref[i] = usbback_req->req.seg[i].gref; in usbback_do_response()
309 ref[i] = usbback_req->req.seg[i + usbback_req->req.nr_buffer_segs].gref; in usbback_do_response()
H A Dhcd-xhci.c805 XHCIEvRingSeg seg; in xhci_er_reset() local
820 if (dma_memory_read(xhci->as, erstba, &seg, sizeof(seg), in xhci_er_reset()
828 le32_to_cpus(&seg.addr_low); in xhci_er_reset()
829 le32_to_cpus(&seg.addr_high); in xhci_er_reset()
830 le32_to_cpus(&seg.size); in xhci_er_reset()
831 if (seg.size < 16 || seg.size > 4096) { in xhci_er_reset()
832 DPRINTF("xhci: invalid value for segment size: %d\n", seg.size); in xhci_er_reset()
836 intr->er_start = xhci_addr64(seg.addr_low, seg.addr_high); in xhci_er_reset()
837 intr->er_size = seg.size; in xhci_er_reset()
/qemu/include/hw/ssi/
H A Daspeed_smc.h114 const AspeedSegments *seg);
116 AspeedSegments *seg);
/qemu/scripts/
H A Dxen-detect.c75 xengnttab_grant_copy_segment_t* seg = NULL; in main() local
92 xengnttab_grant_copy(xg, 0, seg); in main()
/qemu/pc-bios/optionrom/
H A Dlinuxboot_dma.c71 uint32_t seg = (uint32_t)addr >> 4; in set_es() local
72 asm("movl %0, %%es" : : "r"(seg)); in set_es()
/qemu/target/i386/emulate/
H A Dx86_decode.c1617 X86Seg seg = R_DS; in calc_modrm_operand16() local
1637 seg = R_SS; in calc_modrm_operand16()
1641 seg = R_SS; in calc_modrm_operand16()
1651 seg = R_SS; in calc_modrm_operand16()
1661 op->addr = decode_linear_addr(env, decode, (uint16_t)ptr, seg); in calc_modrm_operand16()
1736 X86Seg seg = R_DS; in calc_modrm_operand32() local
1745 ptr += get_sib_val(env, decode, &seg); in calc_modrm_operand32()
1754 seg = R_SS; in calc_modrm_operand32()
1763 op->addr = decode_linear_addr(env, decode, (uint32_t)ptr, seg); in calc_modrm_operand32()
1770 X86Seg seg = R_DS; in calc_modrm_operand64() local
[all …]
H A Dx86_emu.h30 enum X86Seg seg);
H A Dx86.h271 target_ulong linear_addr(CPUState *cpu, target_ulong addr, enum X86Seg seg);
273 enum X86Seg seg);
/qemu/linux-user/
H A Dflatload.c503 abi_ulong seg; in load_flt_binary() local
504 seg = libinfo[i].start_data; in load_flt_binary()
506 seg -= 4; in load_flt_binary()
511 seg)) in load_flt_binary()
/qemu/include/hw/xen/interface/
H A Dphysdev.h273 uint16_t seg; member
301 uint16_t seg; member
/qemu/include/hw/xen/interface/io/
H A Dblkif.h644 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; member
H A Dusbif.h363 struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST]; member
/qemu/target/i386/
H A Dmonitor.c608 #define SEG(name, seg) \ argument
609 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
610 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
611 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
/qemu/tcg/i386/
H A Dtcg-target.c.inc2003 int seg;
2173 h->seg = 0;
2277 tcg_out_modrm_sib_offset(s, OPC_MOVZBL + h.seg, datalo,
2281 tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + h.seg, datalo,
2290 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
2293 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
2298 tcg_out_modrm_sib_offset(s, OPC_MOVZWL + h.seg, datalo,
2304 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
2308 tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + h.seg,
2313 tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
[all …]

12