/linux-5.10/drivers/staging/gasket/ |
D | gasket_sysfs.c | 17 * The device bound to this mapping. If this is NULL, then this mapping 34 /* Tracks active users of this mapping. */ 50 /* Callback when a mapping's refcount goes to zero. */ 56 /* Look up mapping information for the given device. */ 71 dev_dbg(device, "%s: Mapping to device %s not found\n", in get_mapping() 76 /* Put a reference to a mapping. */ 77 static void put_mapping(struct gasket_sysfs_mapping *mapping) in put_mapping() argument 84 if (!mapping) { in put_mapping() 85 pr_debug("%s: Mapping should not be NULL\n", __func__); in put_mapping() 89 mutex_lock(&mapping->mutex); in put_mapping() [all …]
|
/linux-5.10/mm/ |
D | truncate.c | 34 static inline void __clear_shadow_entry(struct address_space *mapping, in __clear_shadow_entry() argument 37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 43 mapping->nrexceptional--; in __clear_shadow_entry() 46 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument 49 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry() 50 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry() 51 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry() 59 static void truncate_exceptional_pvec_entries(struct address_space *mapping, in truncate_exceptional_pvec_entries() argument 67 if (shmem_mapping(mapping)) in truncate_exceptional_pvec_entries() 77 dax = dax_mapping(mapping); in truncate_exceptional_pvec_entries() [all …]
|
D | filemap.c | 120 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument 123 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete() 126 mapping_set_update(&xas, mapping); in page_cache_delete() 141 page->mapping = NULL; in page_cache_delete() 145 mapping->nrexceptional += nr; in page_cache_delete() 154 mapping->nrpages -= nr; in page_cache_delete() 157 static void unaccount_page_cache_page(struct address_space *mapping, in unaccount_page_cache_page() argument 170 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page() 184 if (mapping_exiting(mapping) && in unaccount_page_cache_page() 210 filemap_nr_thps_dec(mapping); in unaccount_page_cache_page() [all …]
|
D | readahead.c | 34 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 36 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init() 48 static void read_cache_pages_invalidate_page(struct address_space *mapping, in read_cache_pages_invalidate_page() argument 54 page->mapping = mapping; in read_cache_pages_invalidate_page() 56 page->mapping = NULL; in read_cache_pages_invalidate_page() 65 static void read_cache_pages_invalidate_pages(struct address_space *mapping, in read_cache_pages_invalidate_pages() argument 73 read_cache_pages_invalidate_page(mapping, victim); in read_cache_pages_invalidate_pages() 79 * @mapping: the address_space 89 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument 98 if (add_to_page_cache_lru(page, mapping, page->index, in read_cache_pages() [all …]
|
/linux-5.10/drivers/gpu/drm/panfrost/ |
D | panfrost_gem.c | 7 #include <linux/dma-mapping.h> 59 struct panfrost_gem_mapping *iter, *mapping = NULL; in panfrost_gem_mapping_get() local 65 mapping = iter; in panfrost_gem_mapping_get() 71 return mapping; in panfrost_gem_mapping_get() 75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) in panfrost_gem_teardown_mapping() argument 79 if (mapping->active) in panfrost_gem_teardown_mapping() 80 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping() 82 priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu); in panfrost_gem_teardown_mapping() 84 if (drm_mm_node_allocated(&mapping->mmnode)) in panfrost_gem_teardown_mapping() 85 drm_mm_remove_node(&mapping->mmnode); in panfrost_gem_teardown_mapping() [all …]
|
/linux-5.10/include/linux/ |
D | pagemap.h | 22 * Bits in mapping->flags. 37 * @mapping: the mapping in which an error should be set 38 * @error: the error to set in the mapping 46 * mapping_set_error to record the error in the mapping so that it can be 49 static inline void mapping_set_error(struct address_space *mapping, int error) in mapping_set_error() argument 55 __filemap_set_wb_err(mapping, error); in mapping_set_error() 58 if (mapping->host) in mapping_set_error() 59 errseq_set(&mapping->host->i_sb->s_wb_err, error); in mapping_set_error() 63 set_bit(AS_ENOSPC, &mapping->flags); in mapping_set_error() 65 set_bit(AS_EIO, &mapping->flags); in mapping_set_error() [all …]
|
D | io-mapping.h | 17 * The io_mapping mechanism provides an abstraction for mapping 20 * See Documentation/driver-api/io-mapping.rst 35 * For small address space machines, mapping large objects 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 72 return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc() 82 io_mapping_map_wc(struct io_mapping *mapping, in io_mapping_map_wc() argument [all …]
|
D | tpm_eventlog.h | 166 void *mapping = NULL; in __calc_tpm2_event_size() local 186 mapping = TPM_MEMREMAP((unsigned long)marker_start, in __calc_tpm2_event_size() 188 if (!mapping) { in __calc_tpm2_event_size() 193 mapping = marker_start; in __calc_tpm2_event_size() 196 event = (struct tcg_pcr_event2_head *)mapping; in __calc_tpm2_event_size() 233 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 235 mapping = TPM_MEMREMAP((unsigned long)marker, in __calc_tpm2_event_size() 237 if (!mapping) { in __calc_tpm2_event_size() 242 mapping = marker; in __calc_tpm2_event_size() 245 memcpy(&halg, mapping, halg_size); in __calc_tpm2_event_size() [all …]
|
/linux-5.10/Documentation/admin-guide/mm/ |
D | nommu-mmap.rst | 2 No-MMU memory mapping support 5 The kernel has limited support for memory mapping under no-MMU conditions, such 7 mapping is made use of in conjunction with the mmap() system call, the shmat() 9 mapping is actually performed by the binfmt drivers, which call back into the 12 Memory mapping behaviour also involves the way fork(), vfork(), clone() and 19 (#) Anonymous mapping, MAP_PRIVATE 27 (#) Anonymous mapping, MAP_SHARED 37 the underlying file are reflected in the mapping; copied across fork. 41 - If one exists, the kernel will re-use an existing mapping to the 45 - If possible, the file mapping will be directly on the backing device [all …]
|
/linux-5.10/drivers/gpu/drm/exynos/ |
D | exynos_drm_dma.c | 35 * drm_iommu_attach_device- attach device to iommu mapping 41 * mapping. 58 * Keep the original DMA mapping of the sub-device and in drm_iommu_attach_device() 67 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device() 69 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 76 * drm_iommu_detach_device -detach device address space mapping from device 82 * mapping 93 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 103 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", in exynos_drm_register_dma() 110 if (!priv->mapping) { in exynos_drm_register_dma() [all …]
|
/linux-5.10/drivers/media/usb/uvc/ |
D | uvc_ctrl.c | 365 static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_get_zoom() argument 383 static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_set_zoom() argument 390 static s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_get_rel_speed() argument 393 unsigned int first = mapping->offset / 8; in uvc_ctrl_get_rel_speed() 410 static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_set_rel_speed() argument 413 unsigned int first = mapping->offset / 8; in uvc_ctrl_set_rel_speed() 759 /* Extract the bit string specified by mapping->offset and mapping->size 761 * a signed 32bit integer. Sign extension will be performed if the mapping 764 static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, in uvc_get_le_value() argument 767 int bits = mapping->size; in uvc_get_le_value() [all …]
|
/linux-5.10/tools/testing/selftests/arm64/mte/ |
D | check_mmap_options.c | 60 static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, int tag_check) in check_anonymous_memory_mapping() argument 70 map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false); in check_anonymous_memory_mapping() 92 static int check_file_memory_mapping(int mem_type, int mode, int mapping, int tag_check) in check_file_memory_mapping() argument 106 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_file_memory_mapping() 131 static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping) in check_clear_prot_mte_flag() argument 141 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 162 ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 214 "Check anonymous memory with private mapping, sync error mode, mmap memory and tag check off\n"); in main() 216 …"Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check off\n… in main() 220 "Check anonymous memory with private mapping, no error mode, mmap memory and tag check off\n"); in main() [all …]
|
D | check_child_memory.c | 84 static int check_child_memory_mapping(int mem_type, int mode, int mapping) in check_child_memory_mapping() argument 93 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_child_memory_mapping() 106 static int check_child_file_mapping(int mem_type, int mode, int mapping) in check_child_file_mapping() argument 119 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_child_file_mapping() 170 "Check child anonymous memory with private mapping, precise mode and mmap memory\n"); in main() 172 "Check child anonymous memory with shared mapping, precise mode and mmap memory\n"); in main() 174 "Check child anonymous memory with private mapping, imprecise mode and mmap memory\n"); in main() 176 "Check child anonymous memory with shared mapping, imprecise mode and mmap memory\n"); in main() 178 "Check child anonymous memory with private mapping, precise mode and mmap/mprotect memory\n"); in main() 180 "Check child anonymous memory with shared mapping, precise mode and mmap/mprotect memory\n"); in main() [all …]
|
/linux-5.10/arch/arm/mm/ |
D | dma-mapping.c | 3 * linux/arch/arm/mm/dma-mapping.c 7 * DMA uncached mapping support. 420 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap() 783 * Create userspace mapping for the DMA-coherent memory. 801 * Free a buffer as defined by the above mapping. 910 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 1099 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 1101 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument 1107 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova() 1118 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova() [all …]
|
D | flush.c | 199 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 202 * Writeback any data associated with the kernel mapping of this in __flush_dcache_page() 204 * coherent with the kernels mapping. in __flush_dcache_page() 232 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_page() 237 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) in __flush_dcache_aliases() argument 247 * - aliasing VIPT: we only need to find one mapping of this page. in __flush_dcache_aliases() 251 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 252 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in __flush_dcache_aliases() 265 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 273 struct address_space *mapping; in __sync_icache_dcache() local [all …]
|
/linux-5.10/Documentation/driver-api/ |
D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used 26 With this mapping object, individual pages can be mapped either atomically 30 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 33 'offset' is the offset within the defined mapping region. 59 void *io_mapping_map_wc(struct io_mapping *mapping, 75 void io_mapping_free(struct io_mapping *mapping) [all …]
|
/linux-5.10/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 6 #include <linux/dma-mapping.h> 65 /* unroll mapping in case something went wrong */ in etnaviv_context_map() 123 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument 125 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping() 127 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping() 129 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping() 164 * so we must keep its mapping. in etnaviv_iommu_find_iova() 196 * this mapping. in etnaviv_iommu_find_iova() 225 struct etnaviv_vram_mapping *mapping, u64 va) in etnaviv_iommu_map_gem() argument 242 mapping->iova = iova; in etnaviv_iommu_map_gem() [all …]
|
D | etnaviv_gem.c | 7 #include <linux/dma-mapping.h> 229 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local 231 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping() 232 if (mapping->context == context) in etnaviv_gem_get_vram_mapping() 233 return mapping; in etnaviv_gem_get_vram_mapping() 239 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument 241 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference() 244 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference() 245 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 256 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() local [all …]
|
/linux-5.10/tools/testing/selftests/vm/ |
D | mremap_dontunmap.c | 62 "unable to unmap destination mapping"); in kernel_support_for_mremap_dontunmap() 66 "unable to unmap source mapping"); in kernel_support_for_mremap_dontunmap() 70 // This helper will just validate that an entire mapping contains the expected 97 // the source mapping mapped. 109 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple() 125 "unable to unmap destination mapping"); in mremap_dontunmap_simple() 127 "unable to unmap source mapping"); in mremap_dontunmap_simple() 138 // create a mapping up front. in mremap_dontunmap_simple_fixed() 157 "mremap should have placed the remapped mapping at dest_mapping"); in mremap_dontunmap_simple_fixed() 159 // The dest mapping will have been unmap by mremap so we expect the Xs in mremap_dontunmap_simple_fixed() [all …]
|
/linux-5.10/fs/ |
D | dax.c | 181 * @entry may no longer be the entry at the index in the mapping. 331 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument 344 WARN_ON_ONCE(page->mapping); in dax_associate_entry() 345 page->mapping = mapping; in dax_associate_entry() 350 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument 362 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry() 363 page->mapping = NULL; in dax_disassociate_entry() 394 /* Ensure page->mapping isn't freed while we look at it */ in dax_lock_page() 397 struct address_space *mapping = READ_ONCE(page->mapping); in dax_lock_page() local 400 if (!mapping || !dax_mapping(mapping)) in dax_lock_page() [all …]
|
/linux-5.10/drivers/sh/clk/ |
D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 349 * dummy mapping for root clocks with no specified ranges in clk_establish_mapping() 352 clk->mapping = &dummy_mapping; in clk_establish_mapping() 357 * If we're on a child clock and it provides no mapping of its in clk_establish_mapping() 358 * own, inherit the mapping from its root clock. in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 366 * Establish initial mapping. in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() [all …]
|
/linux-5.10/fs/gfs2/ |
D | aops.c | 91 struct inode *inode = page->mapping->host; in gfs2_writepage() 120 struct inode * const inode = page->mapping->host; in gfs2_write_jdata_page() 153 struct inode *inode = page->mapping->host; in __gfs2_jdata_writepage() 179 struct inode *inode = page->mapping->host; in gfs2_jdata_writepage() 198 * @mapping: The mapping to write 203 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument 206 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_writepages() 216 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); in gfs2_writepages() 224 * @mapping: The mapping 233 static int gfs2_write_jdata_pagevec(struct address_space *mapping, in gfs2_write_jdata_pagevec() argument [all …]
|
/linux-5.10/arch/nios2/mm/ |
D | cacheflush.c | 73 static void flush_aliases(struct address_space *mapping, struct page *page) in flush_aliases() argument 81 flush_dcache_mmap_lock(mapping); in flush_aliases() 82 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in flush_aliases() 94 flush_dcache_mmap_unlock(mapping); in flush_aliases() 160 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 163 * Writeback any data associated with the kernel mapping of this in __flush_dcache_page() 165 * coherent with the kernels mapping. in __flush_dcache_page() 174 struct address_space *mapping; in flush_dcache_page() local 183 mapping = page_mapping_file(page); in flush_dcache_page() 186 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page() [all …]
|
/linux-5.10/fs/afs/ |
D | write.c | 77 int afs_write_begin(struct file *file, struct address_space *mapping, in afs_write_begin() argument 93 page = grab_cache_page_write_begin(mapping, index, flags); in afs_write_begin() 162 int afs_write_end(struct file *file, struct address_space *mapping, in afs_write_end() argument 238 static void afs_kill_pages(struct address_space *mapping, in afs_kill_pages() argument 241 struct afs_vnode *vnode = AFS_FS_I(mapping->host); in afs_kill_pages() 256 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); in afs_kill_pages() 267 generic_error_remove_page(mapping, page); in afs_kill_pages() 281 struct address_space *mapping, in afs_redirty_pages() argument 284 struct afs_vnode *vnode = AFS_FS_I(mapping->host); in afs_redirty_pages() 299 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); in afs_redirty_pages() [all …]
|
/linux-5.10/Documentation/core-api/ |
D | dma-attributes.rst | 6 defined in linux/dma-mapping.h. 11 DMA_ATTR_WEAK_ORDERING specifies that reads and writes to the mapping 21 DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be 32 virtual mapping for the allocated buffer. On some architectures creating 33 such mapping is non-trivial task and consumes very limited resources 52 having a mapping created separately for each device and is usually 67 device domain after releasing a mapping for it. Use this attribute with 73 By default DMA-mapping subsystem is allowed to assemble the buffer 82 This is a hint to the DMA-mapping subsystem that it's probably not worth 84 efficiency (AKA it's not worth trying to build the mapping out of larger [all …]
|