Lines Matching full:order
85 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
218 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument
221 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument
224 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
228 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
243 /* Bulk allocate order-0 pages */
279 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() argument
284 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof()
290 struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) in __folio_alloc_node_noprof() argument
295 return __folio_alloc_noprof(gfp, order, nid, NULL); in __folio_alloc_node_noprof()
306 unsigned int order) in alloc_pages_node_noprof() argument
311 return __alloc_pages_node_noprof(nid, gfp_mask, order); in alloc_pages_node_noprof()
317 struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
318 struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
319 struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
321 struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
324 static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) in alloc_pages_noprof() argument
326 return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); in alloc_pages_noprof()
328 static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) in folio_alloc_noprof() argument
330 return __folio_alloc_node_noprof(gfp, order, numa_node_id()); in folio_alloc_noprof()
332 static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, in folio_alloc_mpol_noprof() argument
335 return folio_alloc_noprof(gfp, order); in folio_alloc_mpol_noprof()
337 #define vma_alloc_folio_noprof(gfp, order, vma, addr) \ argument
338 folio_alloc_noprof(gfp, order)
357 struct page *try_alloc_pages_noprof(int nid, unsigned int order);
360 extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
378 #define __get_dma_pages(gfp_mask, order) \ argument
379 __get_free_pages((gfp_mask) | GFP_DMA, (order))
381 extern void __free_pages(struct page *page, unsigned int order);
382 extern void free_pages_nolock(struct page *page, unsigned int order);
383 extern void free_pages(unsigned long addr, unsigned int order);
439 static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp, in folio_alloc_gigantic_noprof() argument
444 if (WARN_ON(!order || !(gfp & __GFP_COMP))) in folio_alloc_gigantic_noprof()
447 page = alloc_contig_pages_noprof(1 << order, gfp, nid, node); in folio_alloc_gigantic_noprof()
452 static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp, in folio_alloc_gigantic_noprof() argument