Searched refs:PAGE_ALIGN_DOWN (Results 1 – 25 of 25) sorted by relevance
158 void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); in hva_to_pfn_retry()280 old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); in __kvm_gpc_refresh()281 old_uhva = PAGE_ALIGN_DOWN(gpc->uhva); in __kvm_gpc_refresh()288 gpc->uhva = PAGE_ALIGN_DOWN(uhva); in __kvm_gpc_refresh()
97 mstart = PAGE_ALIGN_DOWN(reset_vector); in acpi_mp_setup_reset()112 mstart = PAGE_ALIGN_DOWN(__pa(asm_acpi_mp_play_dead)); in acpi_mp_setup_reset()
15 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE) macro
51 start = PAGE_ALIGN_DOWN(start); in raw_copy_from_user()
85 phys = PAGE_ALIGN_DOWN(phys); in mmio_guard_ioremap_hook()
256 u64 aligned_iova = PAGE_ALIGN_DOWN(base_iova); in iommufd_hw_queue_destroy_access()293 u64 aligned_iova = PAGE_ALIGN_DOWN(cmd->nesting_parent_iova); in iommufd_hw_queue_alloc_phys()
254 split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE); in truncate_inode_partial_folio()267 PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE); in truncate_inode_partial_folio()
2058 for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) in fault_in_writeable()2128 for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) in fault_in_safe_writeable()2160 for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) in fault_in_readable()
443 return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); in mmap_base()
2134 pgend = PAGE_ALIGN_DOWN(__pa(end_pg)); in free_memmap()
2689 addr = PAGE_ALIGN_DOWN(addr); in make_device_exclusive()
6787 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); in kvfree_rcu_cb()6798 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); in kvfree_rcu_cb()
282 start = (void *)PAGE_ALIGN_DOWN((u64)start); in kmsan_init_alloc_meta_for_range()
294 aligned_end = PAGE_ALIGN_DOWN(zdev->end_dma + 1); in pci_dma_range_setup()
432 shadow_start = PAGE_ALIGN_DOWN(shadow_start); in kasan_populate_vmalloc()
123 return (struct bpf_stream_page *)PAGE_ALIGN_DOWN(addr); in bpf_stream_page_from_elem()
672 nr_pages = (PAGE_ALIGN(end_paddr) - PAGE_ALIGN_DOWN(start_paddr))/PAGE_SIZE; in kexec_mark_dm_crypt_keys()
89 unsigned long sha_start = PAGE_ALIGN_DOWN(__sha(start)); in kasan_populate()
222 range.pos = PAGE_ALIGN_DOWN(*ppos); in fsnotify_pre_content()
643 __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr)); in flush_cache_page_if_present()
639 swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr); in xen_e820_swap_entry_with_ram()
693 start = PAGE_ALIGN_DOWN(efi.unaccepted); in reserve_unaccepted()
225 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE) macro
672 npages = (PAGE_ALIGN((u64)buf + len) - PAGE_ALIGN_DOWN((u64)buf)) >> in vmalloc_to_dma_addrs()
10050 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; in btrfs_add_swap_extent()