Home
last modified time | relevance | path

Searched refs:page_address (Results 1 – 25 of 464) sorted by relevance

12345678910>>...19

/linux/drivers/gpu/drm/v3d/
H A Dv3d_mmu.c32 static bool v3d_mmu_is_aligned(u32 page, u32 page_address, size_t alignment) in v3d_mmu_is_aligned() argument
35 IS_ALIGNED(page_address, alignment >> V3D_MMU_PAGE_SHIFT); in v3d_mmu_is_aligned()
98 u32 page_address = page_prot | pfn; in v3d_mmu_insert_ptes() local
104 v3d_mmu_is_aligned(page, page_address, SZ_1M)) { in v3d_mmu_insert_ptes()
106 page_address |= V3D_PTE_SUPERPAGE; in v3d_mmu_insert_ptes()
108 v3d_mmu_is_aligned(page, page_address, SZ_64K)) { in v3d_mmu_insert_ptes()
110 page_address |= V3D_PTE_BIGPAGE; in v3d_mmu_insert_ptes()
116 v3d->pt[page++] = page_address + i; in v3d_mmu_insert_ptes()
/linux/crypto/async_tx/
H A Draid6test.c40 get_random_bytes(page_address(data[i]), PAGE_SIZE); in makedata()
131 memset(page_address(recovi), 0xf0, PAGE_SIZE); in test_disks()
132 memset(page_address(recovj), 0xba, PAGE_SIZE); in test_disks()
139 erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE); in test_disks()
140 errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE); in test_disks()
167 memset(page_address(data[disks-2]), 0xee, PAGE_SIZE); in test()
168 memset(page_address(data[disks-1]), 0xee, PAGE_SIZE); in test()
H A Dasync_raid6_recov.c73 a = page_address(srcs[0]) + src_offs[0]; in async_sum_product()
74 b = page_address(srcs[1]) + src_offs[1]; in async_sum_product()
75 c = page_address(dest) + d_off; in async_sum_product()
143 d = page_address(dest) + d_off; in async_mult()
144 s = page_address(src) + s_off; in async_mult()
419 ptrs[i] = page_address(blocks[i]) + offs[i]; in async_raid6_2data_recov()
502 ptrs[i] = page_address(blocks[i]) + offs[i]; in async_raid6_datap_recov()
H A Dasync_pq.c124 srcs[i] = page_address(blocks[i]) + offsets[i]; in do_sync_gen_syndrome()
406 p = page_address(p_src) + p_off; in async_syndrome_val()
407 s = page_address(spare) + s_off; in async_syndrome_val()
419 q = page_address(q_src) + q_off; in async_syndrome_val()
420 s = page_address(spare) + s_off; in async_syndrome_val()
/linux/include/linux/
H A Dhighmem-internal.h46 addr = page_address(page); in kmap()
79 return page_address(page); in kmap_local_page_try_from_panic()
173 return page_address(page); in kmap()
182 kunmap_flush_on_unmap(page_address(page)); in kunmap()
188 return page_address(page); in kmap_local_page()
193 return page_address(page); in kmap_local_page_try_from_panic()
225 return page_address(page); in kmap_atomic()
/linux/arch/arm64/mm/
H A Dcopypage.c19 void *kto = page_address(to); in copy_highpage()
20 void *kfrom = page_address(from); in copy_highpage()
48 kfrom = page_address(folio_page(src, i)); in copy_highpage()
49 kto = page_address(folio_page(dst, i)); in copy_highpage()
H A Dpageattr.c188 __change_memory_common((u64)page_address(area->pages[i]), in change_memory_common()
250 return update_range_prot((unsigned long)page_address(page), in set_direct_map_invalid_noflush()
262 return update_range_prot((unsigned long)page_address(page), in set_direct_map_default_noflush()
348 unsigned long addr = (unsigned long)page_address(page); in set_direct_map_valid_noflush()
368 set_memory_valid((unsigned long)page_address(page), numpages, enable); in __kernel_map_pages()
387 unsigned long addr = (unsigned long)page_address(page); in kernel_page_present()
H A Dmteswap.c34 mte_save_page_tags(page_address(page), tag_storage); in mte_save_tags()
58 mte_restore_page_tags(page_address(page), tags); in mte_restore_tags()
/linux/mm/kmsan/
H A Dshadow.c28 return page_address(shadow_page_for(page)); in shadow_ptr_for()
33 return page_address(origin_page_for(page)); in origin_ptr_for()
158 kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE, in kmsan_copy_page_meta()
184 __memset(page_address(shadow), 0, PAGE_SIZE * pages); in kmsan_alloc_page()
185 __memset(page_address(origin), 0, PAGE_SIZE * pages); in kmsan_alloc_page()
193 __memset(page_address(shadow), -1, PAGE_SIZE * pages); in kmsan_alloc_page()
202 ((depot_stack_handle_t *)page_address(origin))[i] = handle; in kmsan_alloc_page()
210 kmsan_internal_poison_memory(page_address(page), page_size(page), in kmsan_free_page()
/linux/arch/riscv/mm/
H A Dpageattr.c122 ptep_new = (pte_t *)page_address(pte_page); in __split_linear_mapping_pmd()
162 pmdp_new = (pmd_t *)page_address(pmd_page); in __split_linear_mapping_pud()
215 pudp_new = (pud_t *)page_address(pud_page); in __split_linear_mapping_p4d()
295 lm_start = (unsigned long)page_address(area->pages[i]); in __set_memory()
379 return __set_memory((unsigned long)page_address(page), 1, in set_direct_map_invalid_noflush()
385 return __set_memory((unsigned long)page_address(page), 1, in set_direct_map_default_noflush()
401 return __set_memory((unsigned long)page_address(page), nr, set, clear); in set_direct_map_valid_noflush()
426 unsigned long start = (unsigned long)page_address(page); in __kernel_map_pages()
437 unsigned long addr = (unsigned long)page_address(page); in kernel_page_present()
/linux/security/selinux/
H A Dstatus.c52 status = page_address(selinux_state.status_page); in selinux_kernel_status_page()
85 status = page_address(selinux_state.status_page); in selinux_status_update_setenforce()
110 status = page_address(selinux_state.status_page); in selinux_status_update_policyload()
/linux/arch/x86/kernel/
H A Dmachine_kexec_32.c105 control_page = page_address(image->control_code_page); in machine_kexec_prepare_page_tables()
139 set_memory_x((unsigned long)page_address(image->control_code_page), 1); in machine_kexec_prepare()
153 set_memory_nx((unsigned long)page_address(image->control_code_page), 1); in machine_kexec_cleanup()
192 control_page = page_address(image->control_code_page); in machine_kexec()
H A Despfix_64.c167 pmd_p = (pmd_t *)page_address(page); in init_espfix_ap()
179 pte_p = (pte_t *)page_address(page); in init_espfix_ap()
187 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); in init_espfix_ap()
H A Dirq_32.c124 per_cpu(hardirq_stack_ptr, cpu) = page_address(ph); in irq_init_percpu_irqstack()
125 per_cpu(softirq_stack_ptr, cpu) = page_address(ps); in irq_init_percpu_irqstack()
/linux/net/ceph/
H A Dcls_lock_client.c56 p = page_address(lock_op_page); in ceph_cls_lock()
115 p = page_address(unlock_op_page); in ceph_cls_unlock()
168 p = page_address(break_op_page); in ceph_cls_break_lock()
217 p = page_address(cookie_op_page); in ceph_cls_set_cookie()
362 p = page_address(get_info_op_page); in ceph_cls_lock_info()
377 p = page_address(reply_page); in ceph_cls_lock_info()
415 p = page_address(pages[0]); in ceph_cls_assert_locked()
/linux/mm/
H A Dhighmem.c282 if (page_address(page)) in map_new_virtual()
283 return (unsigned long)page_address(page); in map_new_virtual()
316 vaddr = (unsigned long)page_address(page); in kmap_high()
342 vaddr = (unsigned long)page_address(page); in kmap_high_get()
369 vaddr = (unsigned long)page_address(page); in kunmap_high()
585 return page_address(page); in __kmap_local_page_prot()
753 void *page_address(const struct page *page) in page_address() function
779 EXPORT_SYMBOL(page_address);
/linux/arch/loongarch/mm/
H A Dpageattr.c168 unsigned long addr = (unsigned long)page_address(page); in kernel_page_present()
203 unsigned long addr = (unsigned long)page_address(page); in set_direct_map_default_noflush()
213 unsigned long addr = (unsigned long)page_address(page); in set_direct_map_invalid_noflush()
223 unsigned long addr = (unsigned long)page_address(page); in set_direct_map_valid_noflush()
/linux/drivers/mtd/devices/
H A Dblock2mtd.c70 max = page_address(page) + PAGE_SIZE; in _block2mtd_erase()
71 for (p=page_address(page); p<max; p++) in _block2mtd_erase()
74 memset(page_address(page), 0xff, PAGE_SIZE); in _block2mtd_erase()
125 memcpy(buf, page_address(page) + offset, cpylen); in block2mtd_read()
159 if (memcmp(page_address(page)+offset, buf, cpylen)) { in _block2mtd_write()
161 memcpy(page_address(page) + offset, buf, cpylen); in _block2mtd_write()
/linux/mm/kasan/
H A Dcommon.c139 kasan_unpoison(set_tag(page_address(page), tag), in __kasan_unpoison_pages()
150 kasan_poison(page_address(page), PAGE_SIZE << order, in __kasan_poison_pages()
161 kasan_poison(page_address(page), page_size(page), in __kasan_poison_slab()
311 if (ptr != page_address(virt_to_head_page(ptr))) { in check_page_allocation()
505 ptr = page_address(page); in __kasan_mempool_poison_pages()
/linux/arch/arm64/kernel/
H A Dmte.c50 mte_clear_page_tags(page_address(page)); in mte_sync_tags()
63 mte_clear_page_tags(page_address(page)); in mte_sync_tags()
77 addr1 = page_address(page1); in memcmp_pages()
78 addr2 = page_address(page2); in memcmp_pages()
488 maddr = page_address(page); in __access_remote_tags()
/linux/arch/arm/mm/
H A Dcopypage-v6.c80 discard_old_kernel_data(page_address(to)); in v6_copy_user_highpage_aliasing()
109 discard_old_kernel_data(page_address(page)); in v6_clear_user_highpage_aliasing()
/linux/arch/powerpc/mm/
H A Ddma-noncoherent.c102 unsigned long start = (unsigned long)page_address(page) + offset; in __dma_sync_page()
121 unsigned long kaddr = (unsigned long)page_address(page); in arch_dma_prep_coherent()
/linux/arch/riscv/kernel/
H A Dmachine_kexec.c63 control_code_buffer = page_address(image->control_code_page); in machine_kexec_prepare()
157 void *control_code_buffer = page_address(image->control_code_page); in machine_kexec()
/linux/kernel/power/
H A Dsnapshot.c61 static inline int __must_check hibernate_restore_protect_page(void *page_address) in hibernate_restore_protect_page() argument
64 return set_memory_ro((unsigned long)page_address, 1); in hibernate_restore_protect_page()
68 static inline int hibernate_restore_unprotect_page(void *page_address) in hibernate_restore_unprotect_page() argument
71 return set_memory_rw((unsigned long)page_address, 1); in hibernate_restore_unprotect_page()
77 static inline int __must_check hibernate_restore_protect_page(void *page_address) {return 0; } in hibernate_restore_protect_page() argument
78 static inline int hibernate_restore_unprotect_page(void *page_address) {return 0; } in hibernate_restore_unprotect_page() argument
103 unsigned long addr = (unsigned long)page_address(page); in hibernate_unmap_page()
239 static void recycle_safe_page(void *page_address) in recycle_safe_page() argument
241 struct linked_page *lp = page_address; in recycle_safe_page()
1457 zeros_only = do_copy_page(dst, page_address(s_page)); in safe_copy_page()
[all …]
/linux/kernel/
H A Dkexec_core.c293 arch_kexec_post_alloc_pages(page_address(pages), count, in kimage_alloc_pages()
311 arch_kexec_pre_free_pages(page_address(page), count); in kimage_free_pages()
456 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); in kimage_alloc_crash_control_pages()
495 ind_page = page_address(page); in kimage_add_entry()
567 arch_kexec_pre_free_pages(page_address(cma), nr_pages); in kimage_free_cma()
744 char *ptr = page_address(cma); in kimage_load_cma_segment()
905 arch_kexec_post_alloc_pages(page_address(page), 1, 0); in kimage_load_crash_segment()
930 arch_kexec_pre_free_pages(page_address(page), 1); in kimage_load_crash_segment()

12345678910>>...19