Lines Matching refs:nodemask
89 #include <linux/nodemask.h>
123 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
313 * @mask: a pointer to a nodemask representing the allowed nodes.
395 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
453 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
454 * All other modes require a valid pointer to a non-empty nodemask.
526 * OOM/allocation failure due to parallel nodemask modification.
551 * Rebind each vma in mm to new nodemask.
1032 * Return nodemask for policy for get_mempolicy() query
1373 nodemask_t *nodemask;
1378 nodemask = policy_nodemask(gfp, pol, ilx, &nid);
1379 return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp,
1504 * nodemask, but one by one may be selected differently.
2119 * barrier stabilizes the nodemask locally so that it can be iterated
2132 nodemask_t nodemask;
2139 nr_nodes = read_once_policy_nodemask(pol, &nodemask);
2151 for_each_node_mask(nid, nodemask)
2156 nid = first_node(nodemask);
2163 nid = next_node_in(nid, nodemask);
2176 nodemask_t nodemask;
2181 nnodes = read_once_policy_nodemask(pol, &nodemask);
2185 nid = first_node(nodemask);
2187 nid = next_node(nid, nodemask);
2192 * Return a nodemask representing a mempolicy for filtering nodes for
2198 nodemask_t *nodemask = NULL;
2206 nodemask = &pol->nodes;
2211 /* Restrict to nodemask (but not on lower zones) */
2214 nodemask = &pol->nodes;
2236 return nodemask;
2246 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2251 * to the mempolicy's @nodemask for filtering the zonelist.
2254 struct mempolicy **mpol, nodemask_t **nodemask)
2261 *nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid);
2269 * to indicate default policy. Otherwise, extract the policy nodemask
2270 * for 'bind' or 'interleave' policy into the argument nodemask, or
2271 * initialize the argument nodemask to contain the single node for
2279 * N.B., it is the caller's responsibility to free a returned nodemask.
2316 * the policy nodemask. Otherwise, return true for all other policies
2341 int nid, nodemask_t *nodemask)
2354 page = __alloc_frozen_pages_noprof(preferred_gfp, order, nid, nodemask);
2374 nodemask_t *nodemask;
2377 nodemask = policy_nodemask(gfp, pol, ilx, &nid);
2380 return alloc_pages_preferred_many(gfp, order, nid, nodemask);
2393 * node in its nodemask, we allocate the standard way.
2397 (!nodemask || node_isset(nid, *nodemask))) {
2416 page = __alloc_frozen_pages_noprof(gfp, order, nid, nodemask);
2586 /* if the nodemask has become invalid, we cannot do anything */
2704 nodemask_t *nodemask;
2723 nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
2724 return alloc_pages_bulk_noprof(gfp, nid, nodemask,
2954 * policy nodemask we don't allow numa migration to nodes
2955 * outside policy nodemask for now. This is done so that if we
2972 * use current page if in policy nodemask,
3153 goto free_scratch; /* no valid nodemask intersection */