Home
last modified time | relevance | path

Searched refs:nr_pages (Results 1 – 25 of 430) sorted by relevance

12345678910>>...18

/linux/io_uring/
H A Dmemmap.c18 static void *io_mem_alloc_compound(struct page **pages, int nr_pages, in io_mem_alloc_compound() argument
34 for (i = 0; i < nr_pages; i++) in io_mem_alloc_compound()
42 unsigned long start, end, nr_pages; in io_pin_pages() local
53 nr_pages = end - start; in io_pin_pages()
54 if (WARN_ON_ONCE(!nr_pages)) in io_pin_pages()
56 if (WARN_ON_ONCE(nr_pages > INT_MAX)) in io_pin_pages()
59 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in io_pin_pages()
63 ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM, in io_pin_pages()
66 if (ret == nr_pages) { in io_pin_pages()
67 *npages = nr_pages; in io_pin_pages()
[all …]
/linux/include/linux/
H A Dhugetlb_cgroup.h127 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
129 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
131 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
134 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
137 extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
139 extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
142 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
144 extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
152 unsigned long nr_pages,
162 unsigned long nr_pages, in hugetlb_cgroup_uncharge_file_region() argument
[all …]
H A Dpage_counter.h71 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
72 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
74 unsigned long nr_pages,
76 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
77 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
78 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
81 unsigned long nr_pages) in page_counter_set_high() argument
83 WRITE_ONCE(counter->high, nr_pages); in page_counter_set_high()
86 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
88 unsigned long *nr_pages);
H A Dmemory_hotplug.h123 long nr_pages);
125 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
127 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
128 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
155 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
159 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
164 unsigned long nr_pages, struct mhp_params *params) in add_pages() argument
166 return __add_pages(nid, start_pfn, nr_pages, params); in add_pages()
169 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
280 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
[all …]
H A Drmap.h398 const struct page *page, int nr_pages, enum pgtable_level level) in __folio_rmap_sanity_checks() argument
415 VM_WARN_ON_ONCE(nr_pages <= 0); in __folio_rmap_sanity_checks()
417 VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio); in __folio_rmap_sanity_checks()
429 VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); in __folio_rmap_sanity_checks()
437 VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio); in __folio_rmap_sanity_checks()
470 void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
478 void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
486 void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
559 struct page *page, int nr_pages, struct vm_area_struct *dst_vma, in __folio_dup_file_rmap() argument
562 const int orig_nr_pages = nr_pages; in __folio_dup_file_rmap()
[all …]
/linux/mm/
H A Dpage_counter.c54 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument
58 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel()
61 new, nr_pages)) { in page_counter_cancel()
76 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument
84 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge()
119 unsigned long nr_pages, in page_counter_try_charge() argument
142 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_try_charge()
144 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge()
169 page_counter_cancel(c, nr_pages); in page_counter_try_charge()
179 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument
[all …]
H A Dmemory_hotplug.c60 unsigned long nr_pages = PFN_UP(memory_block_memmap_size()); in memory_block_memmap_on_memory_pages() local
69 return pageblock_align(nr_pages); in memory_block_memmap_on_memory_pages()
70 return nr_pages; in memory_block_memmap_on_memory_pages()
319 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages) in check_pfn_span() argument
336 if (!IS_ALIGNED(pfn | nr_pages, min_align)) in check_pfn_span()
389 int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument
392 const unsigned long end_pfn = pfn + nr_pages; in __add_pages()
400 VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); in __add_pages()
407 || vmem_altmap_offset(altmap) > nr_pages) { in __add_pages()
414 if (check_pfn_span(pfn, nr_pages)) { in __add_pages()
[all …]
H A Dpercpu-km.c55 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local
65 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk()
71 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk()
78 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk()
89 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local
98 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk()
109 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local
117 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info()
118 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info()
120 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info()
[all …]
H A Dgup_test.c11 unsigned long nr_pages, unsigned int gup_test_flags) in put_back_pages() argument
18 for (i = 0; i < nr_pages; i++) in put_back_pages()
25 unpin_user_pages(pages, nr_pages); in put_back_pages()
29 unpin_user_pages(pages, nr_pages); in put_back_pages()
31 for (i = 0; i < nr_pages; i++) in put_back_pages()
40 unsigned long nr_pages) in verify_dma_pinned() argument
49 for (i = 0; i < nr_pages; i++) { in verify_dma_pinned()
70 unsigned long nr_pages) in dump_pages_test() argument
80 if (gup->which_pages[i] > nr_pages) { in dump_pages_test()
104 unsigned long i, nr_pages, addr, next; in __gup_test_ioctl() local
[all …]
H A Dsparse.c180 unsigned long nr_pages) in subsection_mask_set() argument
183 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set()
188 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument
190 int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init()
197 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init()
207 nr_pages -= pfns; in subsection_map_init()
211 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument
418 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, in __populate_section_memmap() argument
670 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, in populate_section_memmap() argument
673 return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); in populate_section_memmap()
[all …]
H A Dhugetlb_cgroup.c197 unsigned int nr_pages; in hugetlb_cgroup_move_parent() local
211 nr_pages = folio_nr_pages(folio); in hugetlb_cgroup_move_parent()
215 page_counter_charge(&parent->hugepage[idx], nr_pages); in hugetlb_cgroup_move_parent()
219 page_counter_cancel(counter, nr_pages); in hugetlb_cgroup_move_parent()
261 static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in __hugetlb_cgroup_charge_cgroup() argument
282 nr_pages, &counter)) { in __hugetlb_cgroup_charge_cgroup()
298 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument
301 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false); in hugetlb_cgroup_charge_cgroup()
304 int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup_rsvd() argument
307 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true); in hugetlb_cgroup_charge_cgroup_rsvd()
[all …]
H A Dmmu_gather.c60 unsigned int nr_pages = 1; in tlb_flush_rmap_batch() local
64 nr_pages = encoded_nr_pages(pages[++i]); in tlb_flush_rmap_batch()
66 folio_remove_rmap_ptes(page_folio(page), page, nr_pages, in tlb_flush_rmap_batch()
104 unsigned int nr, nr_pages; in __tlb_batch_free_encoded_pages() local
125 for (nr = 0, nr_pages = 0; in __tlb_batch_free_encoded_pages()
126 nr < batch->nr && nr_pages < MAX_NR_FOLIOS_PER_FREE; in __tlb_batch_free_encoded_pages()
130 nr_pages += encoded_nr_pages(pages[++nr]); in __tlb_batch_free_encoded_pages()
132 nr_pages++; in __tlb_batch_free_encoded_pages()
165 struct page *page, unsigned int nr_pages, bool delay_rmap, in __tlb_remove_folio_pages_size() argument
175 VM_WARN_ON_ONCE(nr_pages != 1 && page_size != PAGE_SIZE); in __tlb_remove_folio_pages_size()
[all …]
H A Dcma.h84 void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages);
85 void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages);
86 void cma_sysfs_account_release_pages(struct cma *cma, unsigned long nr_pages);
89 unsigned long nr_pages) {}; in cma_sysfs_account_success_pages() argument
91 unsigned long nr_pages) {}; in cma_sysfs_account_fail_pages() argument
93 unsigned long nr_pages) {}; in cma_sysfs_account_release_pages() argument
H A Dprocess_vm_access.c83 unsigned long nr_pages; in process_vm_rw_single_vec() local
90 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec()
95 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec()
96 int pinned_pages = min_t(unsigned long, nr_pages, PVM_MAX_USER_PAGES); in process_vm_rw_single_vec()
123 nr_pages -= pinned_pages; in process_vm_rw_single_vec()
162 unsigned long nr_pages = 0; in process_vm_rw_core() local
178 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core()
182 if (nr_pages == 0) in process_vm_rw_core()
185 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core()
189 sizeof(struct page *)*nr_pages), in process_vm_rw_core()
H A Dmemcontrol.c135 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
141 unsigned int nr_pages; in obj_cgroup_release() local
166 nr_pages = nr_bytes >> PAGE_SHIFT; in obj_cgroup_release()
168 if (nr_pages) { in obj_cgroup_release()
172 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in obj_cgroup_release()
173 memcg1_account_kmem(memcg, -nr_pages); in obj_cgroup_release()
175 memcg_uncharge(memcg, nr_pages); in obj_cgroup_release()
1272 int zid, int nr_pages) in mem_cgroup_update_lru_size() argument
1284 if (nr_pages < 0) in mem_cgroup_update_lru_size()
1285 *lru_size += nr_pages; in mem_cgroup_update_lru_size()
[all …]
/linux/fs/iomap/
H A Dswapfile.c16 unsigned long nr_pages; /* number of pages collected */ member
30 unsigned long nr_pages; in iomap_swapfile_add_extent() local
37 if (unlikely(isi->nr_pages >= isi->sis->max)) in iomap_swapfile_add_extent()
39 max_pages = isi->sis->max - isi->nr_pages; in iomap_swapfile_add_extent()
52 nr_pages = next_ppage - first_ppage; in iomap_swapfile_add_extent()
53 nr_pages = min(nr_pages, max_pages); in iomap_swapfile_add_extent()
69 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); in iomap_swapfile_add_extent()
73 isi->nr_pages += nr_pages; in iomap_swapfile_add_extent()
182 if (isi.nr_pages == 0) { in iomap_swapfile_activate()
188 sis->max = isi.nr_pages; in iomap_swapfile_activate()
[all …]
/linux/drivers/xen/
H A Dballoon.c390 static enum bp_state increase_reservation(unsigned long nr_pages) in increase_reservation() argument
396 if (nr_pages > ARRAY_SIZE(frame_list)) in increase_reservation()
397 nr_pages = ARRAY_SIZE(frame_list); in increase_reservation()
400 for (i = 0; i < nr_pages; i++) { in increase_reservation()
402 nr_pages = i; in increase_reservation()
410 rc = xenmem_reservation_increase(nr_pages, frame_list); in increase_reservation()
433 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) in decrease_reservation() argument
441 if (nr_pages > ARRAY_SIZE(frame_list)) in decrease_reservation()
442 nr_pages = ARRAY_SIZE(frame_list); in decrease_reservation()
444 for (i = 0; i < nr_pages; i++) { in decrease_reservation()
[all …]
H A Dunpopulated-alloc.c34 static int fill_list(unsigned int nr_pages) in fill_list() argument
39 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); in fill_list()
158 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) in xen_alloc_unpopulated_pages() argument
169 return xen_alloc_ballooned_pages(nr_pages, pages); in xen_alloc_unpopulated_pages()
172 if (list_count < nr_pages) { in xen_alloc_unpopulated_pages()
173 ret = fill_list(nr_pages - list_count); in xen_alloc_unpopulated_pages()
178 for (i = 0; i < nr_pages; i++) { in xen_alloc_unpopulated_pages()
214 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) in xen_free_unpopulated_pages() argument
219 xen_free_ballooned_pages(nr_pages, pages); in xen_free_unpopulated_pages()
224 for (i = 0; i < nr_pages; i++) { in xen_free_unpopulated_pages()
/linux/arch/arm64/kvm/hyp/nvhe/
H A Dsetup.c38 unsigned long nr_pages; in divide_memory_pool() local
42 nr_pages = pkvm_selftest_pages(); in divide_memory_pool()
43 selftest_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
44 if (nr_pages && !selftest_base) in divide_memory_pool()
47 nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page)); in divide_memory_pool()
48 vmemmap_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
52 nr_pages = hyp_vm_table_pages(); in divide_memory_pool()
53 vm_table_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
57 nr_pages = hyp_s1_pgtable_pages(); in divide_memory_pool()
58 hyp_pgt_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool()
[all …]
/linux/drivers/net/ethernet/broadcom/bnge/
H A Dbnge_rmem.c44 for (i = 0; i < rmem->nr_pages; i++) { in bnge_free_ring()
55 size_t pg_tbl_size = rmem->nr_pages * 8; in bnge_free_ring()
78 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { in bnge_alloc_ring()
79 size_t pg_tbl_size = rmem->nr_pages * 8; in bnge_alloc_ring()
90 for (i = 0; i < rmem->nr_pages; i++) { in bnge_alloc_ring()
104 if (rmem->nr_pages > 1 || rmem->depth > 0) { in bnge_alloc_ring()
105 if (i == rmem->nr_pages - 2 && in bnge_alloc_ring()
108 else if (i == rmem->nr_pages - 1 && in bnge_alloc_ring()
152 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE); in bnge_alloc_ctx_pg_tbls()
153 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { in bnge_alloc_ctx_pg_tbls()
[all …]
/linux/arch/arm64/kvm/hyp/include/nvhe/
H A Dmem_protect.h38 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
39 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
40 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
41 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
42 int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
44 int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm);
46 int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm);
47 int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm);
/linux/include/xen/
H A Dxen.h72 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
73 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
78 static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages, in xen_alloc_unpopulated_pages() argument
81 return xen_alloc_ballooned_pages(nr_pages, pages); in xen_alloc_unpopulated_pages()
83 static inline void xen_free_unpopulated_pages(unsigned int nr_pages, in xen_free_unpopulated_pages() argument
86 xen_free_ballooned_pages(nr_pages, pages); in xen_free_unpopulated_pages()
/linux/net/rds/
H A Dinfo.c163 unsigned long nr_pages = 0; in rds_info_getsockopt() local
187 nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) in rds_info_getsockopt()
190 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt()
195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt()
196 if (ret != nr_pages) { in rds_info_getsockopt()
198 nr_pages = ret; in rds_info_getsockopt()
200 nr_pages = 0; in rds_info_getsockopt()
205 rdsdebug("len %d nr_pages %lu\n", len, nr_pages); in rds_info_getsockopt()
238 unpin_user_pages(pages, nr_pages); in rds_info_getsockopt()
/linux/drivers/firmware/efi/libstub/
H A Drelocate.c28 unsigned long nr_pages; in efi_low_alloc_above() local
45 nr_pages = size / EFI_PAGE_SIZE; in efi_low_alloc_above()
63 if (desc->num_pages < nr_pages) in efi_low_alloc_above()
77 EFI_LOADER_DATA, nr_pages, &start); in efi_low_alloc_above()
121 unsigned long nr_pages; in efi_relocate_kernel() local
139 nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; in efi_relocate_kernel()
141 EFI_LOADER_DATA, nr_pages, &efi_addr); in efi_relocate_kernel()
/linux/kernel/events/
H A Dring_buffer.c179 if (rb->nr_pages) { in __perf_output_begin()
243 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin()
339 if (!rb->nr_pages) in ring_buffer_init()
678 pgoff_t pgoff, int nr_pages, long watermark, int flags) in rb_alloc_aux() argument
695 if (nr_pages <= 0) in rb_alloc_aux()
706 (unsigned long)nr_pages << (PAGE_SHIFT - 1)); in rb_alloc_aux()
721 max_order = ilog2(nr_pages); in rb_alloc_aux()
729 if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_PAGE_ORDER) in rb_alloc_aux()
731 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL, in rb_alloc_aux()
737 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { in rb_alloc_aux()
[all …]

12345678910>>...18