Home
last modified time | relevance | path

Searched +full:page +full:- +full:size (Results 1 – 25 of 1066) sorted by relevance

12345678910>>...43

/linux-5.10/arch/arm/mm/
Ddma-mapping.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
33 #include <asm/dma-iommu.h>
36 #include <xen/swiotlb-xen.h>
43 size_t size; member
53 size_t size; member
55 struct page *page; member
[all …]
/linux-5.10/kernel/dma/
Ddirect.c1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Christoph Hellwig.
10 #include <linux/dma-map-ops.h>
21 * override the variable below for dma-direct to work properly.
33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page()
41 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; in dma_direct_get_required_mask()
44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; in dma_direct_get_required_mask()
50 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); in dma_direct_optimal_gfp_mask()
68 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
74 return dma_addr + size - 1 <= in dma_coherent_ok()
[all …]
Dops_helpers.c1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/dma-map-ops.h>
9 * Create scatter-list for the already allocated DMA buffer.
12 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_get_sgtable() argument
15 struct page *page = virt_to_page(cpu_addr); in dma_common_get_sgtable() local
20 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable()
25 * Create userspace mapping for the DMA-coherent memory.
28 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_mmap() argument
33 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_common_mmap()
34 unsigned long off = vma->vm_pgoff; in dma_common_mmap()
[all …]
Dpool.c1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/dma-map-ops.h>
9 #include <linux/dma-direct.h>
23 /* Size can be defined by the coherent_pool command line */
49 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size) in dma_atomic_pool_size_add() argument
52 pool_size_dma += size; in dma_atomic_pool_size_add()
54 pool_size_dma32 += size; in dma_atomic_pool_size_add()
56 pool_size_kernel += size; in dma_atomic_pool_size_add()
61 unsigned long size; in cma_in_zone() local
69 size = cma_get_size(cma); in cma_in_zone()
[all …]
/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dvmm.c32 kvfree(pgt->pde); in nvkm_vmm_pt_del()
41 const struct nvkm_vmm_page *page) in nvkm_vmm_pt_new() argument
43 const u32 pten = 1 << desc->bits; in nvkm_vmm_pt_new()
47 if (desc->type > PGT) { in nvkm_vmm_pt_new()
48 if (desc->type == SPT) { in nvkm_vmm_pt_new()
49 const struct nvkm_vmm_desc *pair = page[-1].desc; in nvkm_vmm_pt_new()
50 lpte = pten >> (desc->bits - pair->bits); in nvkm_vmm_pt_new()
58 pgt->page = page ? page->shift : 0; in nvkm_vmm_pt_new()
59 pgt->sparse = sparse; in nvkm_vmm_pt_new()
61 if (desc->type == PGD) { in nvkm_vmm_pt_new()
[all …]
/linux-5.10/arch/arm64/include/asm/
Dkvm_pgtable.h1 // SPDX-License-Identifier: GPL-2.0-only
17 * struct kvm_pgtable - KVM page-table.
18 * @ia_bits: Maximum input address size, in bits.
19 * @start_level: Level at which the page-table walk starts.
20 * @pgd: Pointer to the first top-level entry of the page-table.
21 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
28 /* Stage-2 only */
33 * enum kvm_pgtable_prot - Page-table permissions and attributes.
53 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
73 * struct kvm_pgtable_walker - Hook into a page-table walk.
[all …]
/linux-5.10/mm/
Ddmapool.c1 // SPDX-License-Identifier: GPL-2.0-only
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks within the page. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
23 #include <linux/dma-mapping.h>
[all …]
Dreadahead.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/readahead.c - address_space-level file readahead.
16 #include <linux/backing-dev.h>
23 #include <linux/blk-cgroup.h>
36 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
37 ra->prev_pos = -1; in file_ra_state_init()
42 * see if a page needs releasing upon read_cache_pages() failure
43 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
49 struct page *page) in read_cache_pages_invalidate_page() argument
51 if (page_has_private(page)) { in read_cache_pages_invalidate_page()
[all …]
Dzsmalloc.c10 * Released under the terms of 3-clause BSD License
16 * struct page(s) to form a zspage.
18 * Usage of struct page fields:
19 * page->private: points to zspage
20 * page->freelist(index): links together all component pages of a zspage
21 * For the huge page, this is always 0, so we use this field
23 * page->units: first object offset in a subpage of zspage
25 * Usage of struct page flags:
26 * PG_private: identifies the first component page
27 * PG_owner_priv_1: identifies the huge component page
[all …]
Dslob.c1 // SPDX-License-Identifier: GPL-2.0
14 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
17 * and within each page, there is a singly-linked list of free blocks
22 * Allocation from heap involves first searching for a page with
23 * sufficient free blocks (using a next-fit-like approach) followed by
24 * a first-fit scan of the page. Deallocation inserts objects back
26 * address-ordered first fit.
29 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
31 * alloc_pages() directly, allocating compound pages so the page order
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
[all …]
Dslub.c1 // SPDX-License-Identifier: GPL-2.0
32 #include <linux/fault-inject.h>
45 * 2. node->list_lock
46 * 3. slab_lock(page) (Only on some arches and for debugging)
55 * A. page->freelist -> List of object free in a page
56 * B. page->inuse -> Number of objects in use
57 * C. page->objects -> Number of objects in page
58 * D. page->frozen -> frozen state
62 * slab is the one who can perform list operations on the page. Other
65 * page's freelist.
[all …]
Dslab.h1 /* SPDX-License-Identifier: GPL-2.0 */
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */ member
26 unsigned int usersize; /* Usercopy region size */
44 #include <linux/fault-inject.h>
61 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
80 unsigned int size; member
88 /* Find the kmalloc slab corresponding for a certain size */
97 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
101 unsigned int size, slab_flags_t flags,
[all …]
Dslab.c1 // SPDX-License-Identifier: GPL-2.0
7 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
9 * Major cleanup, different bufctl logic, per-cpu arrays
17 * Pub: Prentice Hall ISBN 0-13-101908-2
19 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
26 * page long) and always contiguous), and each slab contains multiple
48 * Each cache has a short per-cpu head array, most allocs
54 * The c_cpuarray may not be read with enabled local interrupts -
61 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
62 * and local interrupts are disabled so slab code is preempt-safe.
[all …]
/linux-5.10/sound/pci/emu10k1/
Dmemory.c1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * EMU10K1 memory page allocation (PTB area)
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
21 #define __set_ptb_entry(emu,page,addr) \ argument
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \ argument
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
30 /* get aligned page from offset address */
32 /* get offset address from aligned page */
[all …]
/linux-5.10/include/linux/
Ddma-map-ops.h1 /* SPDX-License-Identifier: GPL-2.0 */
9 #include <linux/dma-mapping.h>
15 void *(*alloc)(struct device *dev, size_t size,
18 void (*free)(struct device *dev, size_t size, void *vaddr,
20 struct page *(*alloc_pages)(struct device *dev, size_t size,
23 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
25 void *(*alloc_noncoherent)(struct device *dev, size_t size,
28 void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr,
34 void *cpu_addr, dma_addr_t dma_addr, size_t size,
37 dma_addr_t (*map_page)(struct device *dev, struct page *page,
[all …]
Dbuffer_head.h1 /* SPDX-License-Identifier: GPL-2.0 */
46 struct page;
53 * within a page, and of course as the unit of I/O through the
57 * a page (via a page_mapping) and for wrapping bio submission
62 struct buffer_head *b_this_page;/* circular list of page's buffers */
63 struct page *b_page; /* the page this bh is mapped to */
66 size_t b_size; /* size of mapping */
67 char *b_data; /* pointer to data within the page */
76 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
78 * buffers in the page */
[all …]
/linux-5.10/drivers/staging/gasket/
Dgasket_page_table.h1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Gasket Page Table functionality. This file describes the address
5 * As much as possible, internal details are hidden to simplify use -
6 * all calls are thread-safe (protected by an internal mutex) except where
29 * @ppage_table: Pointer to Gasket page table pointer. Set by this call.
42 * Description: Allocates and initializes data to track address translation -
43 * simple and extended page table metadata. Initially, the page table is
44 * partitioned such that all addresses are "simple" (single-level lookup).
55 * Deallocate and cleanup page table data.
56 * @page_table: Gasket page table pointer.
[all …]
/linux-5.10/Documentation/admin-guide/mm/
Dhugetlbpage.rst11 the Linux kernel. This support is built on top of multiple page size support
13 support 4K and 2M (1G if architecturally supported) page sizes, ia64
14 architecture supports multiple page sizes 4K, 8K, 64K, 256K, 1M, 4M, 16M,
15 256M and ppc64 supports 4K and 16M. A TLB is a cache of virtual-to-physical
21 Users can use the huge page support in Linux kernel by either using the mmap
30 persistent hugetlb pages in the kernel's huge page pool. It also displays
31 default huge page size and information about the number of free, reserved
32 and surplus huge pages in the pool of huge pages of default size.
33 The huge page size is needed for generating the proper alignment and
34 size of the arguments to system calls that map huge page regions.
[all …]
/linux-5.10/fs/ntfs/
Dlogfile.c1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * logfile.c - NTFS kernel journal handling. Part of the Linux-NTFS project.
5 * Copyright (c) 2002-2007 Anton Altaparmakov
27 * ntfs_check_restart_page_header - check the page header for consistency
28 * @vi: $LogFile inode to which the restart page header belongs
29 * @rp: restart page header to check
30 * @pos: position in @vi at which the restart page header resides
32 * Check the restart page header @rp for consistency and return 'true' if it is
36 * require the full restart page.
47 * If the system or log page sizes are smaller than the ntfs block size in ntfs_check_restart_page_header()
[all …]
/linux-5.10/fs/hfsplus/
Dbitmap.c1 // SPDX-License-Identifier: GPL-2.0
19 int hfsplus_block_allocate(struct super_block *sb, u32 size, in hfsplus_block_allocate() argument
23 struct page *page; in hfsplus_block_allocate() local
32 return size; in hfsplus_block_allocate()
34 hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); in hfsplus_block_allocate()
35 mutex_lock(&sbi->alloc_mutex); in hfsplus_block_allocate()
36 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_allocate()
37 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); in hfsplus_block_allocate()
38 if (IS_ERR(page)) { in hfsplus_block_allocate()
39 start = size; in hfsplus_block_allocate()
[all …]
/linux-5.10/arch/powerpc/mm/
Ddma-noncoherent.c1 // SPDX-License-Identifier: GPL-2.0-only
13 #include <linux/dma-direct.h>
14 #include <linux/dma-map-ops.h>
22 static void __dma_sync(void *vaddr, size_t size, int direction) in __dma_sync() argument
25 unsigned long end = start + size; in __dma_sync()
32 * invalidate only when cache-line aligned otherwise there is in __dma_sync()
35 if ((start | end) & (L1_CACHE_BYTES - 1)) in __dma_sync()
52 * In this case, each page of a buffer must be kmapped/kunmapped
57 * beyond the first page.
59 static inline void __dma_sync_page_highmem(struct page *page, in __dma_sync_page_highmem() argument
[all …]
/linux-5.10/arch/powerpc/include/asm/nohash/32/
Dmmu-8xx.h1 /* SPDX-License-Identifier: GPL-2.0 */
10 * During software tablewalk, the registers used perform mask/shift-add
34 * Then we use the APG to say whether accesses are according to Page rules or
39 * 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
40 * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
41 * 2 => User => 11 (all accesses performed according as user iaw page definition)
42 * 3 => User+Accessed => 00 (all accesses performed as supervisor iaw page definition) for INIT
43 * => 10 (all accesses performed according to swaped page definition) for KUEP
44 * 4-15 => Not Used
49 /* The effective page number register. When read, contains the information
[all …]
/linux-5.10/drivers/iommu/
Ddma-iommu.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
43 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
54 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) in cookie_msi_granule()
55 return cookie->iovad.granule; in cookie_msi_granule()
[all …]
/linux-5.10/fs/ufs/
Dutil.c1 // SPDX-License-Identifier: GPL-2.0
20 struct super_block *sb, u64 fragment, u64 size) in _ubh_bread_() argument
25 if (size & ~uspi->s_fmask) in _ubh_bread_()
27 count = size >> uspi->s_fshift; in _ubh_bread_()
33 ubh->fragment = fragment; in _ubh_bread_()
34 ubh->count = count; in _ubh_bread_()
36 if (!(ubh->bh[i] = sb_bread(sb, fragment + i))) in _ubh_bread_()
39 ubh->bh[i] = NULL; in _ubh_bread_()
43 brelse (ubh->bh[j]); in _ubh_bread_()
49 struct super_block *sb, u64 fragment, u64 size) in ubh_bread_uspi() argument
[all …]
/linux-5.10/mm/kasan/
Dcommon.c1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common generic and tag-based KASAN code.
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
55 track->pid = current->pid; in kasan_set_track()
56 track->stack = kasan_save_stack(flags); in kasan_set_track()
61 current->kasan_depth++; in kasan_enable_current()
66 current->kasan_depth--; in kasan_disable_current()
69 bool __kasan_check_read(const volatile void *p, unsigned int size) in __kasan_check_read() argument
71 return check_memory_region((unsigned long)p, size, false, _RET_IP_); in __kasan_check_read()
75 bool __kasan_check_write(const volatile void *p, unsigned int size) in __kasan_check_write() argument
[all …]

12345678910>>...43