/linux-6.15/arch/arm/mm/ |
D | dma-mapping.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * linux/arch/arm/mm/dma-mapping.c 5 * Copyright (C) 2000-2004 Russell King 17 #include <linux/dma-direct.h> 18 #include <linux/dma-map-ops.h> 28 #include <asm/page.h> 33 #include <asm/dma-iommu.h> 36 #include <asm/xen/xen-ops.h> 43 size_t size; member 53 size_t size; member [all …]
|
/linux-6.15/kernel/dma/ |
D | direct.c | 1 // SPDX-License-Identifier: GPL-2.0 3 * Copyright (C) 2018-2020 Christoph Hellwig. 10 #include <linux/dma-map-ops.h> 21 * override the variable below for dma-direct to work properly. 33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page() 41 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; in dma_direct_get_required_mask() 44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; in dma_direct_get_required_mask() 50 dev->coherent_dma_mask, in dma_direct_optimal_gfp_mask() 51 dev->bus_dma_limit); in dma_direct_optimal_gfp_mask() 69 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument [all …]
|
D | ops_helpers.c | 1 // SPDX-License-Identifier: GPL-2.0 6 #include <linux/dma-map-ops.h> 7 #include <linux/iommu-dma.h> 9 static struct page *dma_common_vaddr_to_page(void *cpu_addr) in dma_common_vaddr_to_page() 17 * Create scatter-list for the already allocated DMA buffer. 20 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_get_sgtable() argument 23 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_get_sgtable() local 28 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable() 33 * Create userspace mapping for the DMA-coherent memory. 36 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_mmap() argument [all …]
|
D | pool.c | 1 // SPDX-License-Identifier: GPL-2.0 8 #include <linux/dma-map-ops.h> 9 #include <linux/dma-direct.h> 23 /* Size can be defined by the coherent_pool command line */ 46 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size) in dma_atomic_pool_size_add() argument 49 pool_size_dma += size; in dma_atomic_pool_size_add() 51 pool_size_dma32 += size; in dma_atomic_pool_size_add() 53 pool_size_kernel += size; in dma_atomic_pool_size_add() 58 unsigned long size; in cma_in_zone() local 66 size = cma_get_size(cma); in cma_in_zone() [all …]
|
/linux-6.15/include/net/page_pool/ |
D | helpers.h | 1 /* SPDX-License-Identifier: GPL-2.0 11 * The page_pool allocator is optimized for recycling page or page fragment used 15 * which allocate memory with or without page splitting depending on the 16 * requested memory size. 19 * always smaller than half a page, it can use one of the more specific API 22 * 1. page_pool_alloc_pages(): allocate memory without page splitting when 23 * driver knows that the memory it need is always bigger than half of the page 24 * allocated from page pool. There is no cache line dirtying for 'struct page' 25 * when a page is recycled back to the page pool. 27 * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver [all …]
|
/linux-6.15/sound/pci/emu10k1/ |
D | memory.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 6 * EMU10K1 memory page allocation (PTB area) 18 /* page arguments of these two macros are Emu page (4096 bytes), not like 21 #define __set_ptb_entry(emu,page,addr) \ argument 22 (((__le32 *)(emu)->ptb_pages.area)[page] = \ 23 cpu_to_le32(((addr) << (emu->address_mode)) | (page))) 24 #define __get_ptb_entry(emu, page) \ argument 25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page])) 30 /* get aligned page from offset address */ 32 /* get offset address from aligned page */ [all …]
|
/linux-6.15/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | vmm.c | 32 kvfree(pgt->pde); in nvkm_vmm_pt_del() 41 const struct nvkm_vmm_page *page) in nvkm_vmm_pt_new() argument 43 const u32 pten = 1 << desc->bits; in nvkm_vmm_pt_new() 47 if (desc->type > PGT) { in nvkm_vmm_pt_new() 48 if (desc->type == SPT) { in nvkm_vmm_pt_new() 49 const struct nvkm_vmm_desc *pair = page[-1].desc; in nvkm_vmm_pt_new() 50 lpte = pten >> (desc->bits - pair->bits); in nvkm_vmm_pt_new() 58 pgt->page = page ? page->shift : 0; in nvkm_vmm_pt_new() 59 pgt->sparse = sparse; in nvkm_vmm_pt_new() 61 if (desc->type == PGD) { in nvkm_vmm_pt_new() [all …]
|
D | uvmm.c | 42 return nvkm_vmm_ref(nvkm_uvmm(object)->vmm); in nvkm_uvmm_search() 51 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_pfnclr() 52 int ret = -ENOSYS; in nvkm_uvmm_mthd_pfnclr() 53 u64 addr, size; in nvkm_uvmm_mthd_pfnclr() local 55 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { in nvkm_uvmm_mthd_pfnclr() 56 addr = args->v0.addr; in nvkm_uvmm_mthd_pfnclr() 57 size = args->v0.size; in nvkm_uvmm_mthd_pfnclr() 61 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw) in nvkm_uvmm_mthd_pfnclr() 62 return -EINVAL; in nvkm_uvmm_mthd_pfnclr() 64 if (size) { in nvkm_uvmm_mthd_pfnclr() [all …]
|
/linux-6.15/arch/arm64/include/asm/ |
D | kvm_pgtable.h | 1 // SPDX-License-Identifier: GPL-2.0-only 14 #define KVM_PGTABLE_FIRST_LEVEL -1 18 * The largest supported block sizes for KVM (no 52-bit PA support): 19 * - 4K (level 1): 1GB 20 * - 16K (level 2): 32MB 21 * - 64K (level 2): 512MB 60 #define KVM_PHYS_INVALID (-1ULL) 99 * Used to indicate a pte for which a 'break-before-make' sequence is in 176 static inline bool kvm_is_block_size_supported(u64 size) in kvm_is_block_size_supported() argument 178 bool is_power_of_two = IS_ALIGNED(size, size); in kvm_is_block_size_supported() [all …]
|
/linux-6.15/Documentation/admin-guide/mm/ |
D | hugetlbpage.rst | 9 the Linux kernel. This support is built on top of multiple page size support 11 support 4K and 2M (1G if architecturally supported) page sizes, ia64 12 architecture supports multiple page sizes 4K, 8K, 64K, 256K, 1M, 4M, 16M, 13 256M and ppc64 supports 4K and 16M. A TLB is a cache of virtual-to-physical 19 Users can use the huge page support in Linux kernel by either using the mmap 28 persistent hugetlb pages in the kernel's huge page pool. It also displays 29 default huge page size and information about the number of free, reserved 30 and surplus huge pages in the pool of huge pages of default size. 31 The huge page size is needed for generating the proper alignment and 32 size of the arguments to system calls that map huge page regions. [all …]
|
D | transhuge.rst | 12 that supports the automatic promotion and demotion of page sizes and 19 in the examples below we presume that the basic page size is 4K and 20 the huge page size is 2M, although the actual numbers may vary 26 requiring larger clear-page copy-page in page faults which is a 28 single page fault for each 2M virtual region touched by userland (so 43 larger size only if both KVM and the Linux guest are using 48 Modern kernels support "multi-size THP" (mTHP), which introduces the 49 ability to allocate memory in blocks that are bigger than a base page 50 but smaller than traditional PMD-size (as described above), in 51 increments of a power-of-2 number of pages. mTHP can back anonymous [all …]
|
/linux-6.15/mm/ |
D | dmapool.c | 1 // SPDX-License-Identifier: GPL-2.0-only 9 * This allocator returns small blocks of a given size which are DMA-able by 10 * the given device. It uses the dma_alloc_coherent page allocator to get 11 * new pages, then splits them up into blocks of the required size. 15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of 16 * allocated pages. Each page in the page_list is split into blocks of at 17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked 19 * keep a count of how many are currently allocated from each page. 23 #include <linux/dma-mapping.h> 56 unsigned int size; member [all …]
|
/linux-6.15/drivers/vdpa/vdpa_user/ |
D | iova_domain.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * MMU-based software IOTLB. 5 * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved. 30 return -ENOMEM; in vduse_iotlb_add_range() 32 map_file->file = get_file(file); in vduse_iotlb_add_range() 33 map_file->offset = offset; in vduse_iotlb_add_range() 35 ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last, in vduse_iotlb_add_range() 38 fput(map_file->file); in vduse_iotlb_add_range() 51 while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) { in vduse_iotlb_del_range() 52 map_file = (struct vdpa_map_file *)map->opaque; in vduse_iotlb_del_range() [all …]
|
/linux-6.15/drivers/android/ |
D | binder_alloc.c | 1 // SPDX-License-Identifier: GPL-2.0-only 6 * Copyright (C) 2007-2017 Google, Inc. 52 return list_entry(buffer->entry.next, struct binder_buffer, entry); in binder_buffer_next() 57 return list_entry(buffer->entry.prev, struct binder_buffer, entry); in binder_buffer_prev() 63 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size() 64 return alloc->vm_start + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size() 65 return binder_buffer_next(buffer)->user_data - buffer->user_data; in binder_alloc_buffer_size() 71 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer() 77 BUG_ON(!new_buffer->free); in binder_insert_free_buffer() 82 "%d: add free buffer, size %zd, at %pK\n", in binder_insert_free_buffer() [all …]
|
/linux-6.15/arch/powerpc/include/asm/nohash/32/ |
D | mmu-8xx.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 10 * During software tablewalk, the registers used perform mask/shift-add 34 * Then we use the APG to say whether accesses are according to Page rules or 39 * 0 => Kernel => 11 (all accesses performed according as user iaw page definition) 40 * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition) 41 * 2 => User => 11 (all accesses performed according as user iaw page definition) 42 * 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP 43 * 4-15 => Not Used 47 /* The effective page number register. When read, contains the information 52 #define MI_EPNMASK 0xfffff000 /* Effective page number for entry */ [all …]
|
/linux-6.15/drivers/misc/ |
D | vmw_balloon.c | 1 // SPDX-License-Identifier: GPL-2.0 5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved. 46 …"Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performanc… 54 /* Magic number for the balloon mount-point */ 80 #define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT) 83 * 64-bit targets are only supported in 64-bit 118 * enum vmballoon_cmd_type - backdoor commands. 140 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size. 141 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page. 142 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about [all …]
|
/linux-6.15/Documentation/mm/ |
D | vmemmap_dedup.rst | 2 .. SPDX-License-Identifier: GPL-2.0 13 The ``struct page`` structures are used to describe a physical page frame. By 14 default, there is a one-to-one mapping from a page frame to its corresponding 15 ``struct page``. 17 HugeTLB pages consist of multiple base page size pages and is supported by many 18 architectures. See Documentation/admin-guide/mm/hugetlbpage.rst for more 19 details. On the x86-64 architecture, HugeTLB pages of size 2MB and 1GB are 20 currently supported. Since the base page size on x86 is 4KB, a 2MB HugeTLB page 21 consists of 512 base pages and a 1GB HugeTLB page consists of 262144 base pages. 22 For each base page, there is a corresponding ``struct page``. [all …]
|
/linux-6.15/include/linux/ |
D | kasan.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 6 #include <linux/kasan-enabled.h> 7 #include <linux/kasan-tags.h> 13 struct page; 32 #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */ 69 int kasan_add_zero_shadow(void *start, unsigned long size); 70 void kasan_remove_zero_shadow(void *start, unsigned long size); 80 static inline int kasan_add_zero_shadow(void *start, unsigned long size) in kasan_add_zero_shadow() argument 85 unsigned long size) in kasan_remove_zero_shadow() argument 105 void __kasan_unpoison_range(const void *addr, size_t size); [all …]
|
D | dma-map-ops.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 9 #include <linux/dma-mapping.h> 17 void *(*alloc)(struct device *dev, size_t size, 20 void (*free)(struct device *dev, size_t size, void *vaddr, 22 struct page *(*alloc_pages_op)(struct device *dev, size_t size, 25 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, 31 void *cpu_addr, dma_addr_t dma_addr, size_t size, 34 dma_addr_t (*map_page)(struct device *dev, struct page *page, 35 unsigned long offset, size_t size, 38 size_t size, enum dma_data_direction dir, [all …]
|
/linux-6.15/arch/powerpc/mm/ |
D | dma-noncoherent.c | 1 // SPDX-License-Identifier: GPL-2.0-only 13 #include <linux/dma-direct.h> 14 #include <linux/dma-map-ops.h> 22 static void __dma_sync(void *vaddr, size_t size, int direction) in __dma_sync() argument 25 unsigned long end = start + size; in __dma_sync() 32 * invalidate only when cache-line aligned otherwise there is in __dma_sync() 35 if ((start | end) & (L1_CACHE_BYTES - 1)) in __dma_sync() 52 * In this case, each page of a buffer must be kmapped/kunmapped 57 * beyond the first page. 59 static inline void __dma_sync_page_highmem(struct page *page, in __dma_sync_page_highmem() argument [all …]
|
/linux-6.15/fs/hfsplus/ |
D | bitmap.c | 1 // SPDX-License-Identifier: GPL-2.0 19 int hfsplus_block_allocate(struct super_block *sb, u32 size, in hfsplus_block_allocate() argument 23 struct page *page; in hfsplus_block_allocate() local 32 return size; in hfsplus_block_allocate() 34 hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); in hfsplus_block_allocate() 35 mutex_lock(&sbi->alloc_mutex); in hfsplus_block_allocate() 36 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_allocate() 37 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); in hfsplus_block_allocate() 38 if (IS_ERR(page)) { in hfsplus_block_allocate() 39 start = size; in hfsplus_block_allocate() [all …]
|
/linux-6.15/sound/pci/trident/ |
D | trident_memory.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 7 * Trident 4DWave-NX memory page allocation (TLB area) 19 /* page arguments of these two macros are Trident page (4096 bytes), not like 22 #define __set_tlb_bus(trident,page,addr) \ argument 23 (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)) 24 #define __tlb_to_addr(trident,page) \ argument 25 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1)) 28 /* page size == SNDRV_TRIDENT_PAGE_SIZE */ 29 #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */ 31 /* fill TLB entrie(s) corresponding to page with ptr */ [all …]
|
/linux-6.15/kernel/module/ |
D | decompress.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 19 struct page **new_pages; in module_extend_max_pages() 21 new_pages = kvmalloc_array(info->max_pages + extent, in module_extend_max_pages() 22 sizeof(info->pages), GFP_KERNEL); in module_extend_max_pages() 24 return -ENOMEM; in module_extend_max_pages() 26 memcpy(new_pages, info->pages, info->max_pages * sizeof(info->pages)); in module_extend_max_pages() 27 kvfree(info->pages); in module_extend_max_pages() 28 info->pages = new_pages; in module_extend_max_pages() 29 info->max_pages += extent; in module_extend_max_pages() 34 static struct page *module_get_next_page(struct load_info *info) in module_get_next_page() [all …]
|
/linux-6.15/lib/ |
D | iov_iter.c | 1 // SPDX-License-Identifier: GPL-2.0-only 4 #include <linux/fault-inject-usercopy.h> 78 * fault_in_iov_iter_readable - fault in iov iterator for reading 80 * @size: maximum length 83 * @size. For each iovec, fault in each page that constitutes the iovec. 88 * Always returns 0 for non-userspace iterators. 90 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_readable() argument 93 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable() 94 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable() 95 return size - n; in fault_in_iov_iter_readable() [all …]
|
/linux-6.15/drivers/gpu/drm/imagination/ |
D | pvr_mmu.c | 1 // SPDX-License-Identifier: GPL-2.0-only OR MIT 17 #include <linux/dma-mapping.h> 23 #define PVR_MASK_FROM_SIZE(size_) (~((size_) - U64_C(1))) 26 * The value of the device page size (%PVR_DEVICE_PAGE_SIZE) is currently 27 * pegged to the host page size (%PAGE_SIZE). This chunk of macro goodness both 28 * ensures that the selected host page size corresponds to a valid device page 29 * size and sets up values needed by the MMU code below. 56 # error Unsupported device page size PVR_DEVICE_PAGE_SIZE 61 (PVR_DEVICE_PAGE_SHIFT - PVR_SHIFT_FROM_SIZE(SZ_4K))) 64 PVR_MMU_SYNC_LEVEL_NONE = -1, [all …]
|