Home
last modified time | relevance | path

Searched refs:mapping (Results 1 – 25 of 1151) sorted by relevance

12345678910>>...47

/linux/include/linux/
H A Dpagemap.h21 unsigned long invalidate_mapping_pages(struct address_space *mapping,
30 int invalidate_inode_pages2(struct address_space *mapping);
31 int invalidate_inode_pages2_range(struct address_space *mapping,
35 int filemap_invalidate_pages(struct address_space *mapping,
41 int filemap_fdatawait_keep_errors(struct address_space *mapping);
43 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
48 static inline int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument
50 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait()
54 int filemap_write_and_wait_range(struct address_space *mapping,
56 int __filemap_fdatawrite_range(struct address_space *mapping,
[all …]
H A Dio-mapping.h58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument
60 iomap_free(mapping->base, mapping->size); in io_mapping_fini()
65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument
70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc()
71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc()
77 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc()
92 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_local_wc() argument
96 BUG_ON(offset >= mapping->size); in io_mapping_map_local_wc()
97 phys_addr = mapping->base + offset; in io_mapping_map_local_wc()
98 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_local_wc()
[all …]
H A Dtpm_eventlog.h166 void *mapping = NULL; in __calc_tpm2_event_size() local
186 mapping = TPM_MEMREMAP((unsigned long)marker_start, in __calc_tpm2_event_size()
188 if (!mapping) { in __calc_tpm2_event_size()
193 mapping = marker_start; in __calc_tpm2_event_size()
196 event = (struct tcg_pcr_event2_head *)mapping; in __calc_tpm2_event_size()
233 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size()
235 mapping = TPM_MEMREMAP((unsigned long)marker, in __calc_tpm2_event_size()
237 if (!mapping) { in __calc_tpm2_event_size()
242 mapping = marker; in __calc_tpm2_event_size()
245 memcpy(&halg, mapping, halg_size); in __calc_tpm2_event_size()
[all …]
/linux/mm/
H A Dtruncate.c26 static void clear_shadow_entries(struct address_space *mapping, in clear_shadow_entries() argument
29 XA_STATE(xas, &mapping->i_pages, start); in clear_shadow_entries()
33 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries()
38 spin_lock(&mapping->host->i_lock); in clear_shadow_entries()
48 if (mapping_shrinkable(mapping)) in clear_shadow_entries()
49 inode_add_lru(mapping->host); in clear_shadow_entries()
50 spin_unlock(&mapping->host->i_lock); in clear_shadow_entries()
60 static void truncate_folio_batch_exceptionals(struct address_space *mapping, in truncate_folio_batch_exceptionals() argument
63 XA_STATE(xas, &mapping->i_pages, indices[0]); in truncate_folio_batch_exceptionals()
69 if (shmem_mapping(mapping)) in truncate_folio_batch_exceptionals()
[all …]
H A Dfilemap.c128 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument
131 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
134 mapping_set_update(&xas, mapping); in page_cache_delete()
144 folio->mapping = NULL; in page_cache_delete()
146 mapping->nrpages -= nr; in page_cache_delete()
149 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument
162 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
191 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio()
209 mapping_can_writeback(mapping))) in filemap_unaccount_folio()
210 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
[all …]
H A Dreadahead.c139 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument
141 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
148 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages()
210 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local
212 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded()
214 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); in page_cache_ra_unbounded()
228 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded()
229 index = mapping_align_index(mapping, index); in page_cache_ra_unbounded()
252 struct folio *folio = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded()
271 mapping_min_folio_order(mapping)); in page_cache_ra_unbounded()
[all …]
/linux/drivers/gpu/drm/tegra/
H A Duapi.c17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local
20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release()
21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release()
23 kfree(mapping); in tegra_drm_mapping_release()
26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument
28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put()
33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local
39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close()
40 tegra_drm_mapping_put(mapping); in tegra_drm_channel_context_close()
189 struct tegra_drm_mapping *mapping; in tegra_drm_ioctl_channel_map() local
[all …]
/linux/drivers/media/usb/uvc/
H A Duvc_ctrl.c388 static bool uvc_ctrl_mapping_is_compound(struct uvc_control_mapping *mapping) in uvc_ctrl_mapping_is_compound() argument
390 return mapping->v4l2_type >= V4L2_CTRL_COMPOUND_TYPES; in uvc_ctrl_mapping_is_compound()
393 static s32 uvc_mapping_get_s32(struct uvc_control_mapping *mapping, in uvc_mapping_get_s32() argument
398 mapping->get(mapping, query, data_in, sizeof(data_out), &data_out); in uvc_mapping_get_s32()
403 static void uvc_mapping_set_s32(struct uvc_control_mapping *mapping, in uvc_mapping_set_s32() argument
406 mapping->set(mapping, sizeof(data_in), &data_in, data_out); in uvc_mapping_set_s32()
423 static int uvc_mapping_get_menu_value(const struct uvc_control_mapping *mapping, in uvc_mapping_get_menu_value() argument
426 if (!test_bit(idx, &mapping->menu_mask)) in uvc_mapping_get_menu_value()
429 if (mapping->menu_mapping) in uvc_mapping_get_menu_value()
430 return mapping->menu_mapping[idx]; in uvc_mapping_get_menu_value()
[all …]
/linux/include/trace/events/
H A Dfilemap.h32 __entry->i_ino = folio->mapping->host->i_ino;
34 if (folio->mapping->host->i_sb)
35 __entry->s_dev = folio->mapping->host->i_sb->s_dev;
37 __entry->s_dev = folio->mapping->host->i_rdev;
62 struct address_space *mapping,
67 TP_ARGS(mapping, index, last_index),
77 __entry->i_ino = mapping->host->i_ino;
78 if (mapping->host->i_sb)
80 mapping->host->i_sb->s_dev;
82 __entry->s_dev = mapping->host->i_rdev;
[all …]
/linux/tools/testing/selftests/arm64/mte/
H A Dcheck_mmap_options.c47 int mapping; member
113 static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, in check_anonymous_memory_mapping() argument
126 map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false); in check_anonymous_memory_mapping()
148 static int check_file_memory_mapping(int mem_type, int mode, int mapping, in check_file_memory_mapping() argument
166 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_file_memory_mapping()
191 static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping, int atag_check) in check_clear_prot_mte_flag() argument
201 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag()
222 ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag()
300 switch (tc->mapping) { in format_test_name()
370 .mapping = MAP_PRIVATE, in main()
[all …]
/linux/drivers/gpu/drm/panfrost/
H A Dpanfrost_gem.c94 struct panfrost_gem_mapping *iter, *mapping = NULL; in panfrost_gem_mapping_get() local
100 mapping = iter; in panfrost_gem_mapping_get()
106 return mapping; in panfrost_gem_mapping_get()
110 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) in panfrost_gem_teardown_mapping() argument
112 if (mapping->active) in panfrost_gem_teardown_mapping()
113 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping()
115 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping()
116 if (drm_mm_node_allocated(&mapping->mmnode)) in panfrost_gem_teardown_mapping()
117 drm_mm_remove_node(&mapping->mmnode); in panfrost_gem_teardown_mapping()
118 spin_unlock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping()
[all …]
/linux/arch/arm/mm/
H A Ddma-mapping.c754 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
756 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument
762 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova()
773 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova()
774 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova()
775 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
776 mapping->bits, 0, count, align); in __alloc_iova()
778 if (start > mapping->bits) in __alloc_iova()
781 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
790 if (i == mapping->nr_bitmaps) { in __alloc_iova()
[all …]
H A Dflush.c199 void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) in __flush_dcache_folio() argument
234 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_folio()
238 static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio) in __flush_dcache_aliases() argument
253 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases()
254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { in __flush_dcache_aliases()
281 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases()
289 struct address_space *mapping; in __sync_icache_dcache() local
303 mapping = folio_flush_mapping(folio); in __sync_icache_dcache()
305 mapping = NULL; in __sync_icache_dcache()
308 __flush_dcache_folio(mapping, folio); in __sync_icache_dcache()
[all …]
/linux/drivers/gpu/drm/etnaviv/
H A Detnaviv_mmu.c113 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument
115 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping()
119 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping()
121 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping()
124 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_reap_mapping() argument
126 struct etnaviv_iommu_context *context = mapping->context; in etnaviv_iommu_reap_mapping()
129 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping()
131 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_reap_mapping()
132 etnaviv_iommu_context_put(mapping->context); in etnaviv_iommu_reap_mapping()
133 mapping->context = NULL; in etnaviv_iommu_reap_mapping()
[all …]
H A Detnaviv_gem.c218 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local
220 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping()
221 if (mapping->context == context) in etnaviv_gem_get_vram_mapping()
222 return mapping; in etnaviv_gem_get_vram_mapping()
228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument
230 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference()
233 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference()
234 mapping->use -= 1; in etnaviv_gem_mapping_unreference()
245 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() local
250 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context); in etnaviv_gem_mapping_get()
[all …]
/linux/arch/arm64/kvm/
H A Dpkvm.c301 struct pkvm_mapping *mapping; in __pkvm_pgtable_stage2_unmap() local
307 for_each_mapping_in_range_safe(pgt, start, end, mapping) { in __pkvm_pgtable_stage2_unmap()
308 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, in __pkvm_pgtable_stage2_unmap()
309 mapping->nr_pages); in __pkvm_pgtable_stage2_unmap()
312 pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); in __pkvm_pgtable_stage2_unmap()
313 kfree(mapping); in __pkvm_pgtable_stage2_unmap()
329 struct pkvm_mapping *mapping = NULL; in pkvm_pgtable_stage2_map() local
345 mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1); in pkvm_pgtable_stage2_map()
346 if (mapping) { in pkvm_pgtable_stage2_map()
347 if (size == (mapping->nr_pages * PAGE_SIZE)) in pkvm_pgtable_stage2_map()
[all …]
/linux/fs/gfs2/
H A Daops.c74 struct inode * const inode = folio->mapping->host; in gfs2_write_jdata_folio()
105 struct inode *inode = folio->mapping->host; in __gfs2_jdata_write_folio()
127 int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc) in gfs2_jdata_writeback() argument
129 struct inode *inode = mapping->host; in gfs2_jdata_writeback()
131 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); in gfs2_jdata_writeback()
139 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in gfs2_jdata_writeback()
158 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument
161 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_writepages()
163 .inode = mapping->host, in gfs2_writepages()
191 static int gfs2_write_jdata_batch(struct address_space *mapping, in gfs2_write_jdata_batch() argument
[all …]
/linux/drivers/gpu/drm/exynos/
H A Dexynos_drm_dma.c66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device()
68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device()
92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device()
109 if (!priv->mapping) { in exynos_drm_register_dma()
110 void *mapping = NULL; in exynos_drm_register_dma() local
113 mapping = arm_iommu_create_mapping(dev, in exynos_drm_register_dma()
116 mapping = iommu_get_domain_for_dev(priv->dma_dev); in exynos_drm_register_dma()
118 if (!mapping) in exynos_drm_register_dma()
120 priv->mapping = mapping; in exynos_drm_register_dma()
140 arm_iommu_release_mapping(priv->mapping); in exynos_drm_cleanup_dma()
[all …]
/linux/Documentation/arch/powerpc/
H A Dvmemmap_dedup.rst14 With 2M PMD level mapping, we require 32 struct pages and a single 64K vmemmap
18 With 1G PUD level mapping, we require 16384 struct pages and a single 64K
20 require 16 64K pages in vmemmap to map the struct page for 1G PUD level mapping.
23 +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+
35 | mapping | +-----------+ | |
46 With 4K page size, 2M PMD level mapping requires 512 struct pages and a single
48 require 8 4K pages in vmemmap to map the struct page for 2M pmd level mapping.
52 +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+
64 | mapping | +-----------+ | |
74 With 1G PUD level mapping, we require 262144 struct pages and a single 4K
[all …]
/linux/Documentation/translations/zh_CN/mm/
H A Dpage_migration.rst143 2. ``int (*migratepage) (struct address_space *mapping,``
168 void __SetPageMovable(struct page *page, struct address_space *mapping)
171 PG_movable不是struct page的一个真正的标志。相反,VM复用了page->mapping的低
175 page->mapping = page->mapping | PAGE_MAPPING_MOVABLE;
177 所以驱动不应该直接访问page->mapping。相反,驱动应该使用page_mapping(),它可
178 以在页面锁下屏蔽掉page->mapping的低2位,从而获得正确的struct address_space。
181 非LRU可移动页面,因为page->mapping字段与struct page中的其他变量是统一的。如
182 果驱动程序在被虚拟机隔离后释放了页面,尽管page->mapping设置了PAGE_MAPPING_MOVABLE,
185 page->mapping中不可能有PAGE_MAPPING_MOVABLE设置。在用pfn扫描中的lock_page()
189 同,PageMovable()在lock_page()下验证page->mapping
[all …]
/linux/drivers/net/wireless/marvell/mwifiex/
H A Dutil.h57 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument
61 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping()
65 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument
69 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping()
74 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local
76 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR()
78 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
/linux/drivers/sh/clk/
H A Dcore.c340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local
345 if (!mapping) { in clk_establish_mapping()
352 clk->mapping = &dummy_mapping; in clk_establish_mapping()
361 mapping = clkp->mapping; in clk_establish_mapping()
362 BUG_ON(!mapping); in clk_establish_mapping()
368 if (!mapping->base && mapping->phys) { in clk_establish_mapping()
369 kref_init(&mapping->ref); in clk_establish_mapping()
371 mapping->base = ioremap(mapping->phys, mapping->len); in clk_establish_mapping()
372 if (unlikely(!mapping->base)) in clk_establish_mapping()
374 } else if (mapping->base) { in clk_establish_mapping()
[all …]
/linux/Documentation/driver-api/
H A Dio-mapping.rst8 The io_mapping functions in linux/io-mapping.h provide an abstraction for
9 efficiently mapping small regions of an I/O device to the CPU. The initial
14 A mapping object is created during driver initialization using::
20 mappable, while 'size' indicates how large a mapping region to
23 This _wc variant provides a mapping which may only be used with
27 With this mapping object, individual pages can be mapped either temporarily
31 void *io_mapping_map_local_wc(struct io_mapping *mapping,
34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping,
37 'offset' is the offset within the defined mapping region. Accessing
46 Temporary mappings are only valid in the context of the caller. The mapping
[all …]
/linux/arch/nios2/include/asm/
H A Dcacheflush.h54 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) argument
55 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) argument
56 #define flush_dcache_mmap_lock_irqsave(mapping, flags) \ argument
57 xa_lock_irqsave(&mapping->i_pages, flags)
58 #define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \ argument
59 xa_unlock_irqrestore(&mapping->i_pages, flags)
/linux/fs/
H A Ddax.c353 return !folio->mapping && folio->share; in dax_folio_is_shared()
372 folio->mapping = NULL; in dax_folio_make_shared()
394 folio->mapping = NULL; in dax_folio_put()
408 new_folio->mapping = NULL; in dax_folio_put()
441 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
452 if (shared && (folio->mapping || dax_folio_is_shared(folio))) { in dax_associate_entry()
453 if (folio->mapping) in dax_associate_entry()
460 WARN_ON_ONCE(folio->mapping); in dax_associate_entry()
463 folio->mapping = mapping; in dax_associate_entry()
468 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
[all …]

12345678910>>...47