| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ |
| H A D | vmap.c | 32 u32 vmap = 0; in nvbios_vmap_table() local 36 vmap = nvbios_rd32(bios, bit_P.offset + 0x20); in nvbios_vmap_table() 37 if (vmap) { in nvbios_vmap_table() 38 *ver = nvbios_rd08(bios, vmap + 0); in nvbios_vmap_table() 42 *hdr = nvbios_rd08(bios, vmap + 1); in nvbios_vmap_table() 43 *cnt = nvbios_rd08(bios, vmap + 3); in nvbios_vmap_table() 44 *len = nvbios_rd08(bios, vmap + 2); in nvbios_vmap_table() 45 return vmap; in nvbios_vmap_table() 60 u32 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len); in nvbios_vmap_parse() local 62 switch (!!vmap * *ver) { in nvbios_vmap_parse() [all …]
|
| /linux/drivers/gpu/drm/xe/tests/ |
| H A D | xe_migrate.c | 109 xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote)); in test_copy() 114 retval = xe_map_rd(xe, &remote->vmap, 0, u64); in test_copy() 117 retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(remote) - 8, u64); in test_copy() 124 xe_map_memset(xe, &remote->vmap, 0, 0xc0, xe_bo_size(remote)); in test_copy() 125 xe_map_memset(xe, &bo->vmap, 0, 0xd0, xe_bo_size(bo)); in test_copy() 132 retval = xe_map_rd(xe, &bo->vmap, 0, u64); in test_copy() 135 retval = xe_map_rd(xe, &bo->vmap, xe_bo_size(bo) - 8, u64); in test_copy() 142 xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote)); in test_copy() 143 xe_map_memset(xe, &bo->vmap, 0, 0xc0, xe_bo_size(bo)); in test_copy() 149 retval = xe_map_rd(xe, &remote->vmap, 0, u64); in test_copy() [all …]
|
| /linux/drivers/net/ |
| H A D | vrf.c | 110 struct vrf_map vmap; member 136 return &nn_vrf->vmap; in netns_vrf_map() 182 static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap, in vrf_map_lookup_elem() argument 189 hash_for_each_possible(vmap->ht, me, hnode, key) { in vrf_map_lookup_elem() 197 static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me) in vrf_map_add_elem() argument 203 hash_add(vmap->ht, &me->hnode, key); in vrf_map_add_elem() 211 static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock) in vrf_map_lock() argument 213 spin_lock(&vmap->vmap_lock); in vrf_map_lock() 216 static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock) in vrf_map_unlock() argument 218 spin_unlock(&vmap->vmap_lock); in vrf_map_unlock() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/volt/ |
| H A D | base.c | 87 u32 vmap; in nvkm_volt_map_min() local 89 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map_min() 90 if (vmap) { in nvkm_volt_map_min() 109 u32 vmap; in nvkm_volt_map() local 111 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map() 112 if (vmap) { in nvkm_volt_map() 295 struct nvbios_vmap vmap; in nvkm_volt_ctor() local 301 if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) { in nvkm_volt_ctor() 302 volt->max0_id = vmap.max0; in nvkm_volt_ctor() 303 volt->max1_id = vmap.max1; in nvkm_volt_ctor() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_sa.c | 71 sa_manager->is_iomem = bo->vmap.is_iomem; in __xe_sa_bo_manager_init() 73 if (bo->vmap.is_iomem) { in __xe_sa_bo_manager_init() 78 sa_manager->cpu_ptr = bo->vmap.vaddr; in __xe_sa_bo_manager_init() 118 if (!sa_manager->bo->vmap.is_iomem) in xe_sa_bo_flush_write() 121 xe_map_memcpy_to(xe, &sa_manager->bo->vmap, drm_suballoc_soffset(sa_bo), in xe_sa_bo_flush_write()
|
| H A D | xe_huc.c | 170 wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0, in huc_auth_via_gsccs() 172 wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset, in huc_auth_via_gsccs() 182 if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap, in huc_auth_via_gsccs() 194 err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE, in huc_auth_via_gsccs() 206 out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status); in huc_auth_via_gsccs()
|
| H A D | xe_gsc.c | 60 xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size); in memcpy_fw() 61 xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size); in memcpy_fw() 62 xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, in memcpy_fw() 150 wr_offset = xe_gsc_emit_header(xe, &bo->vmap, 0, HECI_MEADDRESS_MKHI, 0, in query_compatibility_version() 152 wr_offset = emit_version_query_msg(xe, &bo->vmap, wr_offset); in query_compatibility_version() 164 err = xe_gsc_read_out_header(xe, &bo->vmap, GSC_VER_PKT_SZ, in query_compatibility_version() 172 compat->major = version_query_rd(xe, &bo->vmap, rd_offset, proj_major); in query_compatibility_version() 173 compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_major); in query_compatibility_version() 174 compat->patch = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor); in query_compatibility_version()
|
| H A D | xe_memirq.c | 194 iosys_map_memset(&bo->vmap, 0, 0, bo_size); in memirq_alloc_pages() 197 memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET(0)); in memirq_alloc_pages() 198 memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET(0)); in memirq_alloc_pages() 199 memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET); in memirq_alloc_pages() 452 struct iosys_map src_offset = IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap, in xe_memirq_hwe_handler() 457 IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap, in xe_memirq_hwe_handler()
|
| H A D | xe_pxp_submit.c | 157 gsc_res->batch = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0); in allocate_gsc_client_resources() 158 gsc_res->msg_in = IOSYS_MAP_INIT_OFFSET(&bo->vmap, PXP_BB_SIZE); in allocate_gsc_client_resources() 159 gsc_res->msg_out = IOSYS_MAP_INIT_OFFSET(&bo->vmap, PXP_BB_SIZE + inout_size); in allocate_gsc_client_resources() 310 offset = pxp_emit_session_termination(pxp->xe, &pxp->vcs_exec.bo->vmap, offset, id); in xe_pxp_submit_session_termination() 311 offset = pxp_emit_wait(pxp->xe, &pxp->vcs_exec.bo->vmap, offset); in xe_pxp_submit_session_termination() 312 emit_cmd(pxp->xe, &pxp->vcs_exec.bo->vmap, offset, MI_BATCH_BUFFER_END); in xe_pxp_submit_session_termination()
|
| H A D | xe_lmtt.c | 84 xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, xe_bo_size(bo)); in lmtt_pt_alloc() 292 lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32)); in lmtt_write_pte() 294 xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte); in lmtt_write_pte() 297 lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64)); in lmtt_write_pte() 299 xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte); in lmtt_write_pte()
|
| H A D | xe_bo_types.h | 51 struct iosys_map vmap; member
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | cachetlb.rst | 319 vmap/vmalloc API设置的。由于内核I/O是通过物理页进行的,I/O子系统假定用户 320 映射和内核偏移映射是唯一的别名。这对vmap别名来说是不正确的,所以内核中任何 321 试图对vmap区域进行I/O的东西都必须手动管理一致性。它必须在做I/O之前刷新vmap 326 刷新vmap区域中指定的虚拟地址范围的内核缓存。这是为了确保内核在vmap范围 332 在vmap区域的一个给定的虚拟地址范围的缓存,这可以防止处理器在物理页的I/O 333 发生时通过投机性地读取数据而使缓存变脏。这只对读入vmap区域的数据是必要的。
|
| /linux/drivers/gpu/drm/xe/display/ |
| H A D | xe_hdcp_gsc.c | 88 xe_map_memset(xe, &bo->vmap, 0, 0, xe_bo_size(bo)); in intel_hdcp_gsc_initialize_message() 138 struct iosys_map *map = &gsc_context->hdcp_bo->vmap; in xe_gsc_send_sync() 180 addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap, in intel_hdcp_gsc_msg_send() 183 xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off, in intel_hdcp_gsc_msg_send() 206 xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap, in intel_hdcp_gsc_msg_send()
|
| H A D | xe_dsb_buffer.c | 20 iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val); in intel_dsb_buffer_write() 25 return iosys_map_rd(&dsb_buf->vma->bo->vmap, idx * 4, u32); in intel_dsb_buffer_read() 32 iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size); in intel_dsb_buffer_memset()
|
| H A D | intel_fbdev_fb.c | 107 XE_WARN_ON(iosys_map_is_null(&obj->vmap)); in intel_fbdev_fb_fill_info() 109 info->screen_base = obj->vmap.vaddr_iomem; in intel_fbdev_fb_fill_info()
|
| /linux/mm/ |
| H A D | vma_exec.c | 107 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap, in create_init_stack_vma() argument 149 *vmap = vma; in create_init_stack_vma() 158 *vmap = NULL; in create_init_stack_vma()
|
| /linux/kernel/dma/ |
| H A D | remap.c | 29 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 53 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
|
| /linux/tools/testing/selftests/net/ |
| H A D | test_vxlan_vnifiltering.sh | 221 for vmap in $(echo $vattrs | cut -d "," -f1- --output-delimiter=' ') 223 local vid=$(echo $vmap | awk -F'-' '{print ($1)}') 224 local family=$(echo $vmap | awk -F'-' '{print ($2)}') 225 local localip=$(echo $vmap | awk -F'-' '{print ($3)}') 226 local group=$(echo $vmap | awk -F'-' '{print ($4)}') 227 local vtype=$(echo $vmap | awk -F'-' '{print ($5)}') 228 local port=$(echo $vmap | awk -F'-' '{print ($6)}')
|
| /linux/Documentation/translations/zh_CN/mm/ |
| H A D | vmalloced-kernel-stacks.rst | 48 - vmalloc空间的堆栈需要可靠地工作。例如,如果vmap页表是按需创建的,当堆栈指向 91 工作。架构特定的vmap堆栈分配器照顾到了这个细节。
|
| /linux/drivers/gpu/drm/tiny/ |
| H A D | sharp-memory.c | 129 const struct iosys_map *vmap, in sharp_memory_set_tx_buffer_data() argument 143 drm_fb_xrgb8888_to_mono(&dst, &pitch, vmap, fb, &clip, fmtcnv_state); in sharp_memory_set_tx_buffer_data() 150 const struct iosys_map *vmap, in sharp_memory_update_display() argument 166 sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, vmap, clip, pitch, fmtcnv_state); in sharp_memory_update_display() 209 static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap, in sharp_memory_fb_dirty() argument 222 sharp_memory_update_display(smd, fb, vmap, clip, fmtconv_state); in sharp_memory_fb_dirty()
|
| /linux/drivers/gpu/drm/sitronix/ |
| H A D | st7571-i2c.c | 104 const struct iosys_map *vmap, 273 const struct iosys_map *vmap, in st7571_prepare_buffer_monochrome() argument 287 drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); in st7571_prepare_buffer_monochrome() 292 memcpy(st7571->hwbuf, vmap->vaddr, size); in st7571_prepare_buffer_monochrome() 298 const struct iosys_map *vmap, in st7571_prepare_buffer_grayscale() argument 312 drm_fb_xrgb8888_to_gray2(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); in st7571_prepare_buffer_grayscale() 317 memcpy(st7571->hwbuf, vmap->vaddr, size); in st7571_prepare_buffer_grayscale() 322 memcpy(st7571->hwbuf, vmap->vaddr, size); in st7571_prepare_buffer_grayscale()
|
| H A D | st7586.c | 74 struct iosys_map dst_map, vmap; in st7586_xrgb8888_to_gray332() local 81 iosys_map_set_vaddr(&vmap, vaddr); in st7586_xrgb8888_to_gray332() 82 drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip, fmtcnv_state); in st7586_xrgb8888_to_gray332()
|
| /linux/arch/arm/mm/ |
| H A D | fault-armv.c | 253 p1 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs() 254 p2 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs()
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_gem.c | 336 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); in etnaviv_gem_vmap() 365 return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot); in etnaviv_gem_vmap_impl() 496 .vmap = etnaviv_gem_vmap_impl, 565 .vmap = etnaviv_gem_prime_vmap, 729 .vmap = etnaviv_gem_vmap_impl,
|
| /linux/Documentation/features/vm/huge-vmap/ |
| H A D | arch-support.txt | 2 # Feature name: huge-vmap
|