/linux/mm/ |
H A D | show_mem.c | 119 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) in show_mem_node_skip() argument 129 if (!nodemask) in show_mem_node_skip() 130 nodemask = &cpuset_current_mems_allowed; in show_mem_node_skip() 132 return !node_isset(nid, *nodemask); in show_mem_node_skip() 180 static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) in show_free_areas() argument 190 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas() 227 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) in show_free_areas() 290 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas() 355 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas() 385 if (show_mem_node_skip(filter, nid, nodemask)) in show_free_areas() 395 __show_mem(unsigned int filter,nodemask_t * nodemask,int max_zone_idx) __show_mem() argument [all...] |
H A D | hugetlb_cma.h | 8 int nid, nodemask_t *nodemask); 22 gfp_t gfp_mask, int nid, nodemask_t *nodemask) in hugetlb_cma_alloc_folio() argument
|
H A D | mempolicy.c | 89 #include <linux/nodemask.h> 123 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 313 * @mask: a pointer to a nodemask representing the allowed nodes. 395 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 453 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). in mpol_new() 454 * All other modes require a valid pointer to a non-empty nodemask. in mpol_new() 526 * OOM/allocation failure due to parallel nodemask modification. 551 * Rebind each vma in mm to new nodemask. 1032 * Return nodemask for policy for get_mempolicy() query 1373 nodemask_t *nodemask; in alloc_migration_target_by_mpol() local 2132 nodemask_t nodemask; weighted_interleave_nid() local 2176 nodemask_t nodemask; interleave_nid() local 2198 nodemask_t *nodemask = NULL; policy_nodemask() local 2254 huge_node(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask) huge_node() argument 2341 alloc_pages_preferred_many(gfp_t gfp,unsigned int order,int nid,nodemask_t * nodemask) alloc_pages_preferred_many() argument 2374 nodemask_t *nodemask; alloc_pages_mpol() local 2704 nodemask_t *nodemask; alloc_pages_bulk_mempolicy_noprof() local [all...] |
H A D | oom_kill.c | 95 const nodemask_t *mask = oc->nodemask; in oom_cpuset_eligible() 282 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in in constrained_alloc() 286 if (oc->nodemask && in constrained_alloc() 287 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { in constrained_alloc() 289 for_each_node_mask(nid, *oc->nodemask) in constrained_alloc() 296 highest_zoneidx, oc->nodemask) in constrained_alloc() 317 /* p may not have freeable memory in nodemask */ in oom_evaluate_task() 390 /* p may not have freeable memory in nodemask */ in dump_task() 450 pr_info("oom-kill:constraint=%s,nodemask=%*pbl", in dump_oom_victim() 452 nodemask_pr_args(oc->nodemask)); in dump_oom_victim() [all...] |
H A D | page_alloc.c | 37 #include <linux/nodemask.h> 3419 ac->nodemask) { in unreserve_highatomic_pageblock() 3736 ac->nodemask) { in get_page_from_freelist() 3892 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) in warn_alloc_show_mem() argument 3908 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); in warn_alloc_show_mem() 3911 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) in warn_alloc() argument 3925 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", in warn_alloc() 3927 nodemask_pr_args(nodemask)); in warn_alloc() 3933 warn_alloc_show_mem(gfp_mask, nodemask); in warn_alloc() 3961 .nodemask in __alloc_pages_may_oom() 4895 prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags) prepare_alloc_pages() argument 4959 alloc_pages_bulk_noprof(gfp_t gfp,int preferred_nid,nodemask_t * nodemask,int nr_pages,struct page ** page_array) alloc_pages_bulk_noprof() argument 5113 __alloc_frozen_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask) __alloc_frozen_pages_noprof() argument 5178 __alloc_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask) __alloc_pages_noprof() argument 5190 __folio_alloc_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask) __folio_alloc_noprof() argument 7034 alloc_contig_pages_noprof(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask) alloc_contig_pages_noprof() argument [all...] |
H A D | hugetlb_cma.c | 30 int nid, nodemask_t *nodemask) in hugetlb_cma_alloc_folio() argument 40 for_each_node_mask(node, *nodemask) { in hugetlb_cma_alloc_folio()
|
H A D | numa_memblks.c | 19 * Set nodes, which have memory in @mi, in *@nodemask. 21 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, in numa_nodemask_from_meminfo() argument 29 node_set(mi->blk[i].nid, *nodemask); in numa_nodemask_from_meminfo()
|
H A D | hugetlb.c | 13 #include <linux/nodemask.h> 1371 nodemask_t *nodemask; in dequeue_hugetlb_folio_vma() local 1382 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_hugetlb_folio_vma() 1386 nid, nodemask); in dequeue_hugetlb_folio_vma() 1389 nodemask = NULL; in dequeue_hugetlb_folio_vma() 1394 nid, nodemask); in dequeue_hugetlb_folio_vma() 1477 int nid, nodemask_t *nodemask) in alloc_gigantic_folio() argument 1486 folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask); in alloc_gigantic_folio() 1491 folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); in alloc_gigantic_folio() 1510 int nid, nodemask_t *nodemask) in alloc_gigantic_folio() argument 1518 alloc_gigantic_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask) alloc_gigantic_folio() argument 2315 nodemask_t *nodemask; alloc_buddy_hugetlb_folio_with_mpol() local 6851 nodemask_t *nodemask; alloc_hugetlb_folio_vma() local [all...] |
H A D | vmscan.c | 83 nodemask_t *nodemask; member 6279 sc->reclaim_idx, sc->nodemask) { in shrink_zones() 6416 sc->nodemask) { in do_try_to_free_pages() 6530 nodemask_t *nodemask) in throttle_direct_reclaim() argument 6568 gfp_zone(gfp_mask), nodemask) { in throttle_direct_reclaim() 6610 gfp_t gfp_mask, nodemask_t *nodemask) in try_to_free_pages() argument 6618 .nodemask = nodemask, in try_to_free_pages() 6638 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) in try_to_free_pages()
|
/linux/include/linux/ |
H A D | cpuset.h | 16 #include <linux/nodemask.h> 83 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 163 static inline void set_mems_allowed(nodemask_t nodemask) in set_mems_allowed() argument 170 current->mems_allowed = nodemask; in set_mems_allowed() 222 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) in cpuset_nodemask_valid_mems_allowed() argument 283 static inline void set_mems_allowed(nodemask_t nodemask) in set_mems_allowed() argument
|
H A D | nodemask.h | 20 * The available nodemask operations are: 49 * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set 54 * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask 99 * nodemask_pr_args - printf args to output a nodemask 100 * @maskp: nodemask to be printed 102 * Can be used to provide arguments for '%*pb[l]' when printing a nodemask. 149 #define node_isset(node, nodemask) test_bit((node), (nodemask).bits) argument 151 #define node_test_and_set(node, nodemask) \ argument 152 __node_test_and_set((node), &(nodemask)) 235 nodes_full(nodemask) global() argument 241 nodes_weight(nodemask) global() argument [all...] |
H A D | mempolicy.h | 15 #include <linux/nodemask.h> 56 nodemask_t user_nodemask; /* nodemask passed by user */ 144 struct mempolicy **mpol, nodemask_t **nodemask); 260 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() argument 263 *nodemask = NULL; in huge_node()
|
H A D | gfp.h | 225 nodemask_t *nodemask); 229 nodemask_t *nodemask); 233 nodemask_t *nodemask, int nr_pages, 437 int nid, nodemask_t *nodemask);
|
H A D | oom.h | 8 #include <linux/nodemask.h> 33 nodemask_t *nodemask; member
|
H A D | mmzone.h | 18 #include <linux/nodemask.h> 1706 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point 1709 * @nodes: An optional nodemask to filter the zonelist with 1712 * within the allowed nodemask using a cursor as the starting point for the 1718 * nodemask using a cursor within a zonelist as a starting point 1730 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 1733 * @nodes: An optional nodemask to filter the zonelist with 1736 * within the allowed nodemask. The zoneref returned is a cursor that can be 1741 * never NULL). This may happen either genuinely, or due to concurrent nodemask 1755 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask 1765 for_each_zone_zonelist_nodemask(zone,z,zlist,highidx,nodemask) global() argument 1771 for_next_zone_zonelist_nodemask(zone,z,highidx,nodemask) global() argument [all...] |
/linux/tools/testing/selftests/kvm/x86/ |
H A D | xapic_ipi_test.c | 254 unsigned long nodemask = 0; in do_migrations() local 255 unsigned long nodemasks[sizeof(nodemask) * 8]; in do_migrations() 270 r = get_mempolicy(NULL, &nodemask, sizeof(nodemask) * 8, in do_migrations() 276 sizeof(nodemask) * 8, nodemask); in do_migrations() 282 for (i = 0, bit = 1; i < sizeof(nodemask) * 8; i++, bit <<= 1) { in do_migrations() 283 if (nodemask & bit) { in do_migrations() 284 nodemasks[nodes] = nodemask & bit; in do_migrations()
|
/linux/tools/testing/selftests/futex/functional/ |
H A D | futex_numa_mpol.c | 206 unsigned long nodemask; in main() local 209 nodemask = 1 << i; in main() 210 ret = mbind(futex_ptr, mem_size, MPOL_BIND, &nodemask, in main() 211 sizeof(nodemask) * 8, 0); in main()
|
/linux/Documentation/admin-guide/mm/ |
H A D | numa_memory_policy.rst | 215 is always preferred by passing an empty nodemask with this 216 mode. If an empty nodemask is passed, the policy cannot use 248 satisfied from the nodemask specified in the policy. If there is 249 a memory pressure on all nodes in the nodemask, the allocation 265 This flag specifies that the nodemask passed by 270 change in the set of allowed nodes, the preferred nodemask (Preferred 271 Many), preferred node (Preferred) or nodemask (Bind, Interleave) is 284 3 is allowed from the user's nodemask, the "interleave" only 285 occurs over that node. If no nodes from the user's nodemask are 290 MPOL_PREFERRED policies that were created with an empty nodemask [all...] |
/linux/Documentation/translations/zh_CN/core-api/ |
H A D | memory-hotplug.rst |
|
H A D | printk-formats.rst |
|
/linux/Documentation/translations/zh_CN/mm/ |
H A D | physical_memory.rst | 180 ``include/linux/nodemask.h``。
|
/linux/tools/workqueue/ |
H A D | wq_dump.py | 53 from drgn.helpers.linux.nodemask import for_each_node
|
/linux/Documentation/core-api/ |
H A D | printk-formats.rst | 580 bitmap and its derivatives such as cpumask and nodemask 588 For printing bitmap and its derivatives such as cpumask and nodemask, 594 printing cpumask and nodemask.
|
/linux/kernel/irq/ |
H A D | manage.c | 602 const struct cpumask *nodemask = cpumask_of_node(node); in irq_setup_affinity() local 604 /* make sure at least one of the cpus in nodemask is online */ in irq_setup_affinity() 605 if (cpumask_intersects(&mask, nodemask)) in irq_setup_affinity() 606 cpumask_and(&mask, &mask, nodemask); in irq_setup_affinity()
|
/linux/drivers/tty/ |
H A D | sysrq.c | 390 .nodemask = NULL, in moom_callback()
|