Home
last modified time | relevance | path

Searched full:page (Results 1 – 25 of 5536) sorted by relevance

12345678910>>...222

/linux-5.10/include/linux/
Dpage_ref.h7 #include <linux/page-flags.h>
29 extern void __page_ref_set(struct page *page, int v);
30 extern void __page_ref_mod(struct page *page, int v);
31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
33 extern void __page_ref_mod_unless(struct page *page, int v, int u);
34 extern void __page_ref_freeze(struct page *page, int v, int ret);
35 extern void __page_ref_unfreeze(struct page *page, int v);
41 static inline void __page_ref_set(struct page *page, int v) in __page_ref_set() argument
44 static inline void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument
[all …]
Dpage-flags.h3 * Macros for manipulating and testing page->flags
18 * Various page->flags bits:
20 * PG_reserved is set for special pages. The "struct page" of such a page
25 * - Pages reserved or allocated early during boot (before the page allocator
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
29 * be given to the page allocator.
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
[all …]
Dballoon_compaction.h7 * Balloon page migration makes use of the general non-lru movable page
10 * page->private is used to reference the responsible balloon device.
11 * page->mapping is used in context of non-lru page migration to reference
12 * the address space operations for page isolation/migration/compaction.
14 * As the page isolation scanning step a compaction thread does is a lockless
15 * procedure (from a page standpoint), it might bring some racy situations while
16 * performing balloon page compaction. In order to sort out these racy scenarios
17 * and safely perform balloon's page compaction and migration we must, always,
20 * i. when updating a balloon's page ->mapping element, strictly do it under
23 * +-page_lock(page);
[all …]
Dpagemap.h158 void release_pages(struct page **pages, int nr);
161 * speculatively take a reference to a page.
162 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
166 * been used to lookup the page in the pagecache radix-tree (or page table):
172 * page has been finished with, no matter what it is subsequently allocated
179 * 1. find page in radix tree
181 * 3. check the page is still in pagecache (if no, goto 1)
186 * B. remove page from pagecache
187 * C. free the page
192 * subsequently, B will complete and 1 will find no page, causing the
[all …]
Dmm_inline.h9 * page_is_file_lru - should the page be on a file LRU or anon LRU?
10 * @page: the page to test
12 * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
13 * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal
14 * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by
15 * functions that manipulate the LRU lists, to sort a page onto the right LRU
18 * We would like to get this info without a page flag, but the state
19 * needs to survive until the page is last deleted from the LRU, which
22 static inline int page_is_file_lru(struct page *page) in page_is_file_lru() argument
24 return !PageSwapBacked(page); in page_is_file_lru()
[all …]
Dpage_idle.h6 #include <linux/page-flags.h>
12 static inline bool page_is_young(struct page *page) in page_is_young() argument
14 return PageYoung(page); in page_is_young()
17 static inline void set_page_young(struct page *page) in set_page_young() argument
19 SetPageYoung(page); in set_page_young()
22 static inline bool test_and_clear_page_young(struct page *page) in test_and_clear_page_young() argument
24 return TestClearPageYoung(page); in test_and_clear_page_young()
27 static inline bool page_is_idle(struct page *page) in page_is_idle() argument
29 return PageIdle(page); in page_is_idle()
32 static inline void set_page_idle(struct page *page) in set_page_idle() argument
[all …]
Dpage_owner.h11 extern void __reset_page_owner(struct page *page, unsigned int order);
12 extern void __set_page_owner(struct page *page,
14 extern void __split_page_owner(struct page *page, unsigned int nr);
15 extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
16 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
17 extern void __dump_page_owner(struct page *page);
21 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
24 __reset_page_owner(page, order); in reset_page_owner()
27 static inline void set_page_owner(struct page *page, in set_page_owner() argument
31 __set_page_owner(page, order, gfp_mask); in set_page_owner()
[all …]
Dmigrate.h10 typedef struct page *new_page_t(struct page *page, unsigned long private);
11 typedef void free_page_t(struct page *page, unsigned long private);
17 * - negative errno on page migration failure;
18 * - zero on page migration success;
40 struct page *newpage, struct page *page,
44 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
45 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
46 extern void putback_movable_page(struct page *page);
50 extern void migrate_page_states(struct page *newpage, struct page *page);
51 extern void migrate_page_copy(struct page *newpage, struct page *page);
[all …]
Dhighmem.h15 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument
21 static inline void flush_kernel_dcache_page(struct page *page) in flush_kernel_dcache_page() argument
35 extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
47 void *kmap_high(struct page *page);
48 static inline void *kmap(struct page *page) in kmap() argument
53 if (!PageHighMem(page)) in kmap()
54 addr = page_address(page); in kmap()
56 addr = kmap_high(page); in kmap()
61 void kunmap_high(struct page *page);
63 static inline void kunmap(struct page *page) in kunmap() argument
[all …]
Dhugetlb_cgroup.h25 * Minimum page order trackable by hugetlb cgroup.
27 * The second tail page (hpage[2]) is the fault usage cgroup.
28 * The third tail page (hpage[3]) is the reservation usage cgroup.
62 __hugetlb_cgroup_from_page(struct page *page, bool rsvd) in __hugetlb_cgroup_from_page() argument
64 VM_BUG_ON_PAGE(!PageHuge(page), page); in __hugetlb_cgroup_from_page()
66 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) in __hugetlb_cgroup_from_page()
69 return (struct hugetlb_cgroup *)page[3].private; in __hugetlb_cgroup_from_page()
71 return (struct hugetlb_cgroup *)page[2].private; in __hugetlb_cgroup_from_page()
74 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) in hugetlb_cgroup_from_page() argument
76 return __hugetlb_cgroup_from_page(page, false); in hugetlb_cgroup_from_page()
[all …]
Dhuge_mm.h28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
54 * @pgprot: page protection to use
73 * @pgprot: page protection to use
186 extern void prep_transhuge_page(struct page *page);
187 extern void free_transhuge_page(struct page *page);
188 bool is_transparent_hugepage(struct page *page);
190 bool can_split_huge_page(struct page *page, int *pextra_pins);
191 int split_huge_page_to_list(struct page *page, struct list_head *list);
192 static inline int split_huge_page(struct page *page) in split_huge_page() argument
194 return split_huge_page_to_list(page, NULL); in split_huge_page()
[all …]
Dmm.h27 #include <linux/page-flags.h>
100 #include <asm/page.h>
128 * a zero page mapping on a read fault.
131 * related to the physical page in case of virtualization.
144 /* This function must be updated when the size of struct page grows above 80
151 static inline void __mm_zero_struct_page(struct page *page) in __mm_zero_struct_page() argument
153 unsigned long *_pp = (void *)page; in __mm_zero_struct_page()
155 /* Check that struct page is either 56, 64, 72, or 80 bytes */ in __mm_zero_struct_page()
156 BUILD_BUG_ON(sizeof(struct page) & 7); in __mm_zero_struct_page()
157 BUILD_BUG_ON(sizeof(struct page) < 56); in __mm_zero_struct_page()
[all …]
/linux-5.10/mm/
Dswap.c45 /* How many pages do we try to swap or page in/out together? */
79 static void __page_cache_release(struct page *page) in __page_cache_release() argument
81 if (PageLRU(page)) { in __page_cache_release()
82 pg_data_t *pgdat = page_pgdat(page); in __page_cache_release()
87 lruvec = mem_cgroup_page_lruvec(page, pgdat); in __page_cache_release()
88 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release()
89 __ClearPageLRU(page); in __page_cache_release()
90 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release()
93 __ClearPageWaiters(page); in __page_cache_release()
96 static void __put_single_page(struct page *page) in __put_single_page() argument
[all …]
Dmigrate.c7 * Page migration was first developed in the context of the memory hotplug
86 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
94 * In case we 'win' a race for a movable page being freed under us and in isolate_movable_page()
97 * release this page, thus avoiding a nasty leakage. in isolate_movable_page()
99 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
103 * Check PageMovable before holding a PG_lock because page's owner in isolate_movable_page()
104 * assumes anybody doesn't touch PG_lock of newly allocated page in isolate_movable_page()
105 * so unconditionally grabbing the lock ruins page's owner side. in isolate_movable_page()
107 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
111 * compaction threads can race against page migration functions in isolate_movable_page()
[all …]
Dfilemap.c63 * finished 'unifying' the page and buffer cache and SMP-threaded the
64 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
121 struct page *page, void *shadow) in page_cache_delete() argument
123 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
129 if (!PageHuge(page)) { in page_cache_delete()
130 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
131 nr = compound_nr(page); in page_cache_delete()
134 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete()
135 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete()
136 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
[all …]
Dpage_io.c10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
30 struct page *page, bio_end_io_t end_io) in get_swap_bio() argument
38 bio->bi_iter.bi_sector = map_swap_page(page, &bdev); in get_swap_bio()
43 bio_add_page(bio, page, thp_size(page), 0); in get_swap_bio()
50 struct page *page = bio_first_page_all(bio); in end_swap_bio_write() local
53 SetPageError(page); in end_swap_bio_write()
55 * We failed to write the page out to swap-space. in end_swap_bio_write()
56 * Re-dirty the page in order to avoid it being reclaimed. in end_swap_bio_write()
62 set_page_dirty(page); in end_swap_bio_write()
66 ClearPageReclaim(page); in end_swap_bio_write()
[all …]
Dballoon_compaction.c15 struct page *page) in balloon_page_enqueue_one() argument
18 * Block others from accessing the 'page' when we get around to in balloon_page_enqueue_one()
20 * holding a reference to the 'page' at this point. If we are not, then in balloon_page_enqueue_one()
23 BUG_ON(!trylock_page(page)); in balloon_page_enqueue_one()
24 balloon_page_insert(b_dev_info, page); in balloon_page_enqueue_one()
25 unlock_page(page); in balloon_page_enqueue_one()
30 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page
32 * @b_dev_info: balloon device descriptor where we will insert a new page to
43 struct page *page, *tmp; in balloon_page_list_enqueue() local
48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue()
[all …]
Drmap.c10 * Provides methods for unmapping each kind of mapped page:
25 * page->flags PG_locked (lock_page) * (see huegtlbfs below)
28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
50 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
51 * page->flags PG_locked (lock_page)
272 * searches where page is mapped.
455 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
459 * have been relevant to this page.
461 * The page might have been remapped to a different anon_vma or the anon_vma
466 * ensure that any anon_vma obtained from the page will still be valid for as
[all …]
Dtruncate.c30 * Regular page slots are stabilized by the page lock even without the tree
83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries() local
86 if (!xa_is_value(page)) { in truncate_exceptional_pvec_entries()
87 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries()
99 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries()
138 * do_invalidatepage - invalidate part or all of a page
139 * @page: the page which is affected
143 * do_invalidatepage() is called when all or part of the page has become
152 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument
155 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage()
[all …]
/linux-5.10/Documentation/vm/
Dpage_migration.rst4 Page migration
7 Page migration allows moving the physical location of pages between
15 The main intent of page migration is to reduce the latency of memory accesses
19 Page migration allows a process to manually relocate the node on which its
25 Page migration functions are provided by the numactl package by Andi Kleen
28 which provides an interface similar to other NUMA functionality for page
31 proc(5) man page.
37 manual page migration support. Automatic page migration may be implemented
54 Page migration allows the preservation of the relative location of pages
60 Page migration occurs in several steps. First a high level
[all …]
/linux-5.10/fs/jfs/
Djfs_metapage.c25 uint pagealloc; /* # of page allocations */
26 uint pagefree; /* # of page frees */
48 unlock_page(mp->page); in __lock_metapage()
50 lock_page(mp->page); in __lock_metapage()
58 * Must have mp->page locked
79 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) argument
81 static inline struct metapage *page_to_mp(struct page *page, int offset) in page_to_mp() argument
83 if (!PagePrivate(page)) in page_to_mp()
85 return mp_anchor(page)->mp[offset >> L2PSIZE]; in page_to_mp()
88 static inline int insert_metapage(struct page *page, struct metapage *mp) in insert_metapage() argument
[all …]
/linux-5.10/fs/sysv/
Ddir.c31 static inline void dir_put_page(struct page *page) in dir_put_page() argument
33 kunmap(page); in dir_put_page()
34 put_page(page); in dir_put_page()
37 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument
39 struct address_space *mapping = page->mapping; in dir_commit_chunk()
43 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk()
49 err = write_one_page(page); in dir_commit_chunk()
51 unlock_page(page); in dir_commit_chunk()
55 static struct page * dir_get_page(struct inode *dir, unsigned long n) in dir_get_page()
58 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() local
[all …]
/linux-5.10/sound/pci/trident/
Dtrident_memory.c7 * Trident 4DWave-NX memory page allocation (TLB area)
19 /* page arguments of these two macros are Trident page (4096 bytes), not like
22 #define __set_tlb_bus(trident,page,ptr,addr) \ argument
23 do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
24 (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
25 #define __tlb_to_ptr(trident,page) \ argument
26 (void*)((trident)->tlb.shadow_entries[page])
27 #define __tlb_to_addr(trident,page) \ argument
28 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
31 /* page size == SNDRV_TRIDENT_PAGE_SIZE */
[all …]
/linux-5.10/fs/9p/
Dvfs_addr.c32 * v9fs_fid_readpage - read an entire page in from 9P
35 * @page: structure to page
38 static int v9fs_fid_readpage(void *data, struct page *page) in v9fs_fid_readpage() argument
41 struct inode *inode = page->mapping->host; in v9fs_fid_readpage()
42 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage()
48 BUG_ON(!PageLocked(page)); in v9fs_fid_readpage()
50 retval = v9fs_readpage_from_fscache(inode, page); in v9fs_fid_readpage()
56 retval = p9_client_read(fid, page_offset(page), &to, &err); in v9fs_fid_readpage()
58 v9fs_uncache_page(inode, page); in v9fs_fid_readpage()
63 zero_user(page, retval, PAGE_SIZE - retval); in v9fs_fid_readpage()
[all …]
/linux-5.10/net/core/
Dpage_pool.c16 #include <linux/page-flags.h>
43 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, in page_pool_init()
53 /* In order to request DMA-sync-for-device the page in page_pool_init()
101 static void page_pool_return_page(struct page_pool *pool, struct page *page);
104 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache()
107 struct page *page; in page_pool_refill_alloc_cache() local
129 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache()
130 if (unlikely(!page)) in page_pool_refill_alloc_cache()
133 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
134 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
[all …]

12345678910>>...222