/linux/arch/x86/kernel/ |
H A D | espfix_64.c | 165 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); in init_espfix_ap() 177 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); in init_espfix_ap() 187 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); in init_espfix_ap()
|
H A D | irq_32.c | 115 ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); in irq_init_percpu_irqstack() 118 ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); in irq_init_percpu_irqstack()
|
/linux/arch/loongarch/kernel/ |
H A D | irq.c | 118 page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order); in init_IRQ()
|
/linux/kernel/dma/ |
H A D | ops_helpers.c | 70 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages()
|
H A D | direct.c | 142 page = alloc_pages_node(node, gfp, get_order(size)); in __dma_direct_alloc_pages()
|
/linux/drivers/hv/ |
H A D | hv_proc.c | 57 pages[i] = alloc_pages_node(node, GFP_KERNEL, order); in hv_call_deposit_pages()
|
H A D | channel.c | 179 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), in vmbus_alloc_ring()
|
/linux/arch/sparc/kernel/ |
H A D | iommu.c | 122 page = alloc_pages_node(numa_node, GFP_KERNEL, 0); in iommu_table_init() 133 page = alloc_pages_node(numa_node, GFP_KERNEL, order); in iommu_table_init() 214 page = alloc_pages_node(nid, gfp, order); in dma_4u_alloc_coherent()
|
/linux/arch/loongarch/mm/ |
H A D | tlb.c | 298 page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz)); in setup_tlb_handler()
|
/linux/mm/ |
H A D | percpu-vm.c | 95 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); in pcpu_alloc_pages()
|
H A D | hugetlb_vmemmap.c | 341 walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0); in vmemmap_remap_free() 396 page = alloc_pages_node(nid, gfp_mask, 0); in alloc_vmemmap_page_list()
|
H A D | sparse-vmemmap.c | 68 page = alloc_pages_node(node, gfp_mask, order); in vmemmap_alloc_block()
|
/linux/kernel/events/ |
H A D | ring_buffer.c | 624 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page() 822 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); in perf_mmap_alloc_page()
|
/linux/arch/powerpc/perf/ |
H A D | imc-pmu.c | 614 page = alloc_pages_node(nid, in core_imc_mem_init() 931 page = alloc_pages_node(nid, in thread_imc_mem_alloc() 1214 page = alloc_pages_node(phys_id, in trace_imc_mem_alloc()
|
/linux/include/linux/ |
H A D | gfp.h | 314 #define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__)) macro
|
/linux/arch/powerpc/platforms/powernv/ |
H A D | pci-ioda-tce.c | 67 tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN, in pnv_alloc_tce_level()
|
/linux/drivers/misc/sgi-gru/ |
H A D | grufile.c | 267 page = alloc_pages_node(nid, GFP_KERNEL, order); in gru_init_tables()
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.c | 103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages()
|
/linux/drivers/net/ethernet/fungible/funcore/ |
H A D | fun_queue.c | 194 rqinfo->page = alloc_pages_node(node, GFP_KERNEL, 0); in fun_fill_rq()
|
/linux/drivers/net/ethernet/amd/xgbe/ |
H A D | xgbe-desc.c | 191 pages = alloc_pages_node(node, gfp, order); in xgbe_alloc_pages()
|
/linux/drivers/gpu/drm/msm/ |
H A D | msm_iommu.c | 382 p = alloc_pages_node(dev_to_node(pagetable->iommu_dev), in msm_iommu_pagetable_alloc_pt()
|
/linux/arch/x86/kvm/svm/ |
H A D | svm.h | 875 return alloc_pages_node(node, gfp | __GFP_ZERO, 0); in snp_safe_alloc_page_node()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | pagealloc.c | 300 page = alloc_pages_node(nid, GFP_HIGHUSER, 0); in alloc_system_page()
|
/linux/drivers/gpu/drm/ttm/ |
H A D | ttm_pool.c | 152 p = alloc_pages_node(pool->nid, gfp_flags, order); in ttm_pool_alloc_page()
|
/linux/drivers/iommu/ |
H A D | dma-iommu.c | 902 page = alloc_pages_node(nid, alloc_flags, order); in __iommu_dma_alloc_pages() 1612 page = alloc_pages_node(node, gfp, get_order(alloc_size)); in iommu_dma_alloc_pages()
|