| /linux/arch/arm64/kernel/ |
| H A D | paravirt.c | 38 struct pvclock_vcpu_stolen_time __rcu *kaddr; member 55 struct pvclock_vcpu_stolen_time *kaddr = NULL; in para_steal_clock() local 67 kaddr = rcu_dereference(reg->kaddr); in para_steal_clock() 68 if (!kaddr) { in para_steal_clock() 73 ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time)); in para_steal_clock() 80 struct pvclock_vcpu_stolen_time *kaddr = NULL; in stolen_time_cpu_down_prepare() local 84 if (!reg->kaddr) in stolen_time_cpu_down_prepare() 87 kaddr = rcu_replace_pointer(reg->kaddr, NULL, true); in stolen_time_cpu_down_prepare() 89 memunmap(kaddr); in stolen_time_cpu_down_prepare() 96 struct pvclock_vcpu_stolen_time *kaddr = NULL; in stolen_time_cpu_online() local [all …]
|
| /linux/fs/nilfs2/ |
| H A D | dir.c | 109 static bool nilfs_check_folio(struct folio *folio, char *kaddr) in nilfs_check_folio() argument 127 p = (struct nilfs_dir_entry *)(kaddr + offs); in nilfs_check_folio() 177 p = (struct nilfs_dir_entry *)(kaddr + offs); in nilfs_check_folio() 191 void *kaddr; in nilfs_get_folio() local 196 kaddr = kmap_local_folio(folio, 0); in nilfs_get_folio() 198 if (!nilfs_check_folio(folio, kaddr)) in nilfs_get_folio() 203 return kaddr; in nilfs_get_folio() 206 folio_release_kmap(folio, kaddr); in nilfs_get_folio() 247 char *kaddr, *limit; in nilfs_readdir() local 251 kaddr = nilfs_get_folio(inode, n, &folio); in nilfs_readdir() [all …]
|
| /linux/fs/ext2/ |
| H A D | dir.c | 99 static bool ext2_check_folio(struct folio *folio, int quiet, char *kaddr) in ext2_check_folio() argument 118 p = (ext2_dirent *)(kaddr + offs); in ext2_check_folio() 170 p = (ext2_dirent *)(kaddr + offs); in ext2_check_folio() 194 void *kaddr; in ext2_get_folio() local 198 kaddr = kmap_local_folio(folio, 0); in ext2_get_folio() 200 if (!ext2_check_folio(folio, quiet, kaddr)) in ext2_get_folio() 204 return kaddr; in ext2_get_folio() 207 folio_release_kmap(folio, kaddr); in ext2_get_folio() 278 char *kaddr = ext2_get_folio(inode, n, 0, &folio); in ext2_readdir() local 281 if (IS_ERR(kaddr)) { in ext2_readdir() [all …]
|
| /linux/fs/ufs/ |
| H A D | dir.c | 109 static bool ufs_check_folio(struct folio *folio, char *kaddr) in ufs_check_folio() argument 127 p = (struct ufs_dir_entry *)(kaddr + offs); in ufs_check_folio() 177 p = (struct ufs_dir_entry *)(kaddr + offs); in ufs_check_folio() 191 void *kaddr; in ufs_get_folio() local 195 kaddr = kmap_local_folio(folio, 0); in ufs_get_folio() 197 if (!ufs_check_folio(folio, kaddr)) in ufs_get_folio() 201 return kaddr; in ufs_get_folio() 204 folio_release_kmap(folio, kaddr); in ufs_get_folio() 271 char *kaddr = ufs_get_folio(dir, n, foliop); in ufs_find_entry() local 273 if (!IS_ERR(kaddr)) { in ufs_find_entry() [all …]
|
| /linux/arch/loongarch/include/asm/ |
| H A D | page.h | 73 struct page *dmw_virt_to_page(unsigned long kaddr); 74 struct page *tlb_virt_to_page(unsigned long kaddr); 82 #define virt_to_page(kaddr) phys_to_page(__pa(kaddr)) argument 94 #define virt_to_page(kaddr) \ argument 96 (likely((unsigned long)kaddr < vm_map_base)) ? \ 97 dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\ 103 #define virt_to_pfn(kaddr) page_to_pfn(virt_to_page(kaddr)) argument 105 extern int __virt_addr_valid(volatile void *kaddr); 106 #define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr)) argument
|
| H A D | kfence.h | 20 char *kaddr, *vaddr; in arch_kfence_init_pool() local 40 kaddr = kfence_pool; in arch_kfence_init_pool() 42 while (kaddr < kfence_pool + KFENCE_POOL_SIZE) { in arch_kfence_init_pool() 43 set_page_address(virt_to_page(kaddr), vaddr); in arch_kfence_init_pool() 44 kaddr += PAGE_SIZE; in arch_kfence_init_pool()
|
| H A D | io.h | 71 #define virt_to_phys(kaddr) \ argument 73 (likely((unsigned long)kaddr < vm_map_base)) ? __pa((unsigned long)kaddr) : \ 74 page_to_phys(tlb_virt_to_page((unsigned long)kaddr)) + offset_in_page((unsigned long)kaddr);\
|
| /linux/fs/minix/ |
| H A D | dir.c | 102 char *p, *kaddr, *limit; in minix_readdir() local 105 kaddr = dir_get_folio(inode, n, &folio); in minix_readdir() 106 if (IS_ERR(kaddr)) in minix_readdir() 108 p = kaddr+offset; in minix_readdir() 109 limit = kaddr + minix_last_byte(inode, n) - chunk_size; in minix_readdir() 132 folio_release_kmap(folio, kaddr); in minix_readdir() 169 char *kaddr, *limit; in minix_find_entry() local 171 kaddr = dir_get_folio(dir, n, foliop); in minix_find_entry() 172 if (IS_ERR(kaddr)) in minix_find_entry() 175 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize; in minix_find_entry() [all …]
|
| /linux/fs/btrfs/ |
| H A D | accessors.c | 59 char *kaddr = folio_address(eb->folios[idx]) + oif; \ 69 return get_unaligned_le##bits(kaddr); \ 72 lebytes[0] = *kaddr; \ 73 kaddr = folio_address(eb->folios[idx + 1]); \ 74 lebytes[1] = *kaddr; \ 76 memcpy_split_src(lebytes, kaddr, \ 89 char *kaddr = folio_address(eb->folios[idx]) + oif; \ 99 put_unaligned_le##bits(val, kaddr); \ 104 *kaddr = lebytes[0]; \ 105 kaddr = folio_address(eb->folios[idx + 1]); \ [all …]
|
| H A D | lzo.c | 147 char *kaddr; in copy_compressed_data_to_page() local 167 kaddr = kmap_local_folio(cur_folio, offset_in_folio(cur_folio, *cur_out)); in copy_compressed_data_to_page() 168 write_compress_length(kaddr, compressed_size); in copy_compressed_data_to_page() 178 kunmap_local(kaddr); in copy_compressed_data_to_page() 191 kaddr = kmap_local_folio(cur_folio, 0); in copy_compressed_data_to_page() 193 memcpy(kaddr + offset_in_folio(cur_folio, *cur_out), in copy_compressed_data_to_page() 208 memset(kaddr + offset_in_page(*cur_out), 0, in copy_compressed_data_to_page() 213 kunmap_local(kaddr); in copy_compressed_data_to_page() 345 char *kaddr; in lzo_decompress_bio() local 354 kaddr = kmap_local_folio(cb->compressed_folios[0], 0); in lzo_decompress_bio() [all …]
|
| /linux/arch/m68k/sun3/ |
| H A D | dvma.c | 23 static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr) in dvma_page() argument 29 j = *(volatile unsigned long *)kaddr; in dvma_page() 30 *(volatile unsigned long *)kaddr = j; in dvma_page() 32 ptep = pfn_pte(virt_to_pfn((void *)kaddr), PAGE_KERNEL); in dvma_page() 40 return (vaddr + (kaddr & ~PAGE_MASK)); in dvma_page() 44 int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, in dvma_map_iommu() argument 56 dvma_page(kaddr, vaddr); in dvma_map_iommu() 57 kaddr += PAGE_SIZE; in dvma_map_iommu()
|
| H A D | sun3dvma.c | 262 unsigned long dvma_map_align(unsigned long kaddr, int len, int align) in dvma_map_align() argument 271 if(!kaddr || !len) { in dvma_map_align() 277 pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr); in dvma_map_align() 278 off = kaddr & ~DVMA_PAGE_MASK; in dvma_map_align() 279 kaddr &= PAGE_MASK; in dvma_map_align() 291 if(!dvma_map_iommu(kaddr, baddr, len)) in dvma_map_align() 294 pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, in dvma_map_align() 319 unsigned long kaddr; in dvma_malloc_align() local 329 if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0) in dvma_malloc_align() 332 if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) { in dvma_malloc_align() [all …]
|
| /linux/drivers/acpi/ |
| H A D | nvs.c | 78 void *kaddr; member 138 if (entry->kaddr) { in suspend_nvs_free() 140 iounmap(entry->kaddr); in suspend_nvs_free() 143 acpi_os_unmap_iomem(entry->kaddr, in suspend_nvs_free() 146 entry->kaddr = NULL; in suspend_nvs_free() 182 entry->kaddr = acpi_os_get_iomem(phys, size); in suspend_nvs_save() 183 if (!entry->kaddr) { in suspend_nvs_save() 184 entry->kaddr = acpi_os_ioremap(phys, size); in suspend_nvs_save() 185 entry->unmap = !!entry->kaddr; in suspend_nvs_save() 187 if (!entry->kaddr) { in suspend_nvs_save() [all …]
|
| /linux/fs/erofs/ |
| H A D | xattr.c | 15 void *kaddr; member 86 it.kaddr = erofs_bread(&it.buf, it.pos, true); in erofs_init_inode_xattrs() 87 if (IS_ERR(it.kaddr)) { in erofs_init_inode_xattrs() 88 ret = PTR_ERR(it.kaddr); in erofs_init_inode_xattrs() 92 ih = it.kaddr; in erofs_init_inode_xattrs() 107 it.kaddr = erofs_bread(&it.buf, it.pos, true); in erofs_init_inode_xattrs() 108 if (IS_ERR(it.kaddr)) { in erofs_init_inode_xattrs() 111 ret = PTR_ERR(it.kaddr); in erofs_init_inode_xattrs() 114 vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)it.kaddr); in erofs_init_inode_xattrs() 188 it->kaddr = erofs_bread(&it->buf, it->pos, true); in erofs_xattr_copy_to_buffer() [all …]
|
| /linux/arch/m68k/sun3x/ |
| H A D | dvma.c | 78 inline int dvma_map_cpu(unsigned long kaddr, in dvma_map_cpu() argument 87 kaddr &= PAGE_MASK; in dvma_map_cpu() 92 pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr); in dvma_map_cpu() 127 __pa(kaddr), vaddr); in dvma_map_cpu() 128 set_pte(pte, pfn_pte(virt_to_pfn((void *)kaddr), in dvma_map_cpu() 131 kaddr += PAGE_SIZE; in dvma_map_cpu() 146 int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, int len) in dvma_map_iommu() argument 162 dvma_entry_set(index, __pa(kaddr)); in dvma_map_iommu() 167 kaddr += DVMA_PAGE_SIZE; in dvma_map_iommu()
|
| /linux/arch/arm/include/asm/ |
| H A D | tlbflush.h | 472 static inline void __local_flush_tlb_kernel_page(unsigned long kaddr) in __local_flush_tlb_kernel_page() argument 477 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); in __local_flush_tlb_kernel_page() 478 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); in __local_flush_tlb_kernel_page() 479 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); in __local_flush_tlb_kernel_page() 483 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); in __local_flush_tlb_kernel_page() 484 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); in __local_flush_tlb_kernel_page() 485 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); in __local_flush_tlb_kernel_page() 488 static inline void local_flush_tlb_kernel_page(unsigned long kaddr) in local_flush_tlb_kernel_page() argument 492 kaddr &= PAGE_MASK; in local_flush_tlb_kernel_page() 497 __local_flush_tlb_kernel_page(kaddr); in local_flush_tlb_kernel_page() [all …]
|
| H A D | memory.h | 292 unsigned long kaddr = (unsigned long)p; in virt_to_pfn() local 293 return (((kaddr - PAGE_OFFSET) >> PAGE_SHIFT) + in virt_to_pfn() 384 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) argument 385 #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < … argument 386 && pfn_valid(virt_to_pfn(kaddr)))
|
| /linux/arch/hexagon/include/asm/ |
| H A D | page.h | 90 #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr))) argument 95 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) argument 119 static inline unsigned long virt_to_pfn(const void *kaddr) in virt_to_pfn() argument 121 return __pa(kaddr) >> PAGE_SHIFT; in virt_to_pfn()
|
| /linux/include/linux/ |
| H A D | highmem.h | 239 void *kaddr = kmap_local_page(page); in clear_highpage() local 240 clear_page(kaddr); in clear_highpage() 241 kunmap_local(kaddr); in clear_highpage() 246 void *kaddr = kmap_local_page(page); in clear_highpage_kasan_tagged() local 248 clear_page(kasan_reset_tag(kaddr)); in clear_highpage_kasan_tagged() 249 kunmap_local(kaddr); in clear_highpage_kasan_tagged() 272 void *kaddr = kmap_local_page(page); in zero_user_segments() local 278 memset(kaddr + start1, 0, end1 - start1); in zero_user_segments() 281 memset(kaddr + start2, 0, end2 - start2); in zero_user_segments() 283 kunmap_local(kaddr); in zero_user_segments() [all …]
|
| /linux/tools/testing/nvdimm/ |
| H A D | pmem-dax.c | 12 long nr_pages, enum dax_access_mode mode, void **kaddr, in __pmem_direct_access() argument 28 if (kaddr) in __pmem_direct_access() 29 *kaddr = pmem->virt_addr + offset; in __pmem_direct_access() 39 if (kaddr) in __pmem_direct_access() 40 *kaddr = pmem->virt_addr + offset; in __pmem_direct_access()
|
| /linux/arch/arc/include/asm/ |
| H A D | page.h | 123 static inline unsigned long virt_to_pfn(const void *kaddr) in virt_to_pfn() argument 125 return __pa(kaddr) >> PAGE_SHIFT; in virt_to_pfn() 128 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) argument 129 #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) argument
|
| /linux/arch/m68k/include/asm/ |
| H A D | page_no.h | 22 static inline unsigned long virt_to_pfn(const void *kaddr) in virt_to_pfn() argument 24 return __pa(kaddr) >> PAGE_SHIFT; in virt_to_pfn() 35 #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET) && \ argument 36 ((unsigned long)(kaddr) < memory_end))
|
| /linux/arch/mips/include/asm/ |
| H A D | page.h | 208 #define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr))) argument 209 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) argument 211 extern bool __virt_addr_valid(const volatile void *kaddr); 212 #define virt_addr_valid(kaddr) \ argument 213 __virt_addr_valid((const volatile void *) (kaddr))
|
| /linux/arch/x86/include/asm/ |
| H A D | page.h | 68 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) argument 69 extern bool __virt_addr_valid(unsigned long kaddr); 70 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr)) argument
|
| /linux/arch/csky/include/asm/ |
| H A D | page.h | 33 #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \ argument 34 (void *)(kaddr) < high_memory) 73 static inline unsigned long virt_to_pfn(const void *kaddr) in virt_to_pfn() argument 75 return __pa(kaddr) >> PAGE_SHIFT; in virt_to_pfn()
|