Lines Matching full:nodes
15 * interleave Allocate memory interleaved over a set of nodes,
23 * Allocate memory interleaved over a set of nodes based on
29 * bind Only allocate memory on a specific set of nodes,
33 * the allocation to memory nodes instead
41 * preferred many Try a set of nodes first before normal fallback. This is
204 * @mask: a pointer to a nodemask representing the allowed nodes.
206 * This function iterates over all nodes in @mask and calculates the
250 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
251 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
267 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_nodemask() argument
269 if (nodes_empty(*nodes)) in mpol_new_nodemask()
271 pol->nodes = *nodes; in mpol_new_nodemask()
275 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_preferred() argument
277 if (nodes_empty(*nodes)) in mpol_new_preferred()
280 nodes_clear(pol->nodes); in mpol_new_preferred()
281 node_set(first_node(*nodes), pol->nodes); in mpol_new_preferred()
287 * any, for the new policy. mpol_new() has already validated the nodes
294 const nodemask_t *nodes, struct nodemask_scratch *nsc) in mpol_set_nodemask() argument
310 VM_BUG_ON(!nodes); in mpol_set_nodemask()
313 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); in mpol_set_nodemask()
315 nodes_and(nsc->mask2, *nodes, nsc->mask1); in mpol_set_nodemask()
318 pol->w.user_nodemask = *nodes; in mpol_set_nodemask()
328 * initialization. You must invoke mpol_set_nodemask() to set nodes.
331 nodemask_t *nodes) in mpol_new() argument
336 if (nodes && !nodes_empty(*nodes)) in mpol_new()
340 VM_BUG_ON(!nodes); in mpol_new()
348 if (nodes_empty(*nodes)) { in mpol_new()
356 if (!nodes_empty(*nodes) || in mpol_new()
360 } else if (nodes_empty(*nodes)) in mpol_new()
382 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) in mpol_rebind_default() argument
386 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) in mpol_rebind_nodemask() argument
391 nodes_and(tmp, pol->w.user_nodemask, *nodes); in mpol_rebind_nodemask()
393 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); in mpol_rebind_nodemask()
395 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, in mpol_rebind_nodemask()
396 *nodes); in mpol_rebind_nodemask()
397 pol->w.cpuset_mems_allowed = *nodes; in mpol_rebind_nodemask()
401 tmp = *nodes; in mpol_rebind_nodemask()
403 pol->nodes = tmp; in mpol_rebind_nodemask()
407 const nodemask_t *nodes) in mpol_rebind_preferred() argument
409 pol->w.cpuset_mems_allowed = *nodes; in mpol_rebind_preferred()
413 * mpol_rebind_policy - Migrate a policy to a different set of nodes
754 * Check page nodes, and queue pages to move, in the current vma. in queue_pages_test_walk()
779 * If pages found in a given range are not on the required set of @nodes,
792 nodemask_t *nodes, unsigned long flags, in queue_pages_range() argument
799 .nmask = nodes, in queue_pages_range()
879 nodemask_t *nodes) in do_set_mempolicy() argument
888 new = mpol_new(mode, flags, nodes); in do_set_mempolicy()
895 ret = mpol_set_nodemask(new, nodes, scratch); in do_set_mempolicy()
922 static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes) in get_policy_nodemask() argument
924 nodes_clear(*nodes); in get_policy_nodemask()
934 *nodes = pol->nodes; in get_policy_nodemask()
1017 *policy = next_node_in(current->il_prev, pol->nodes); in do_get_mempolicy()
1024 pol->nodes); in do_get_mempolicy()
1171 * This lets us pick a pair of nodes to migrate between, such that in do_migrate_pages()
1200 * However if the number of source nodes is not equal to in do_migrate_pages()
1201 * the number of destination nodes we can not preserve in do_migrate_pages()
1221 /* dest not in remaining from nodes? */ in do_migrate_pages()
1388 * targeted nodes, for the first VMA to be migrated; for later in do_mbind()
1389 * VMAs, the nodes will still be interleaved from the targeted in do_mbind()
1469 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, in get_nodes() argument
1473 nodes_clear(*nodes); in get_nodes()
1480 * When the user specified more nodes than supported just check in get_nodes()
1501 return get_bitmap(nodes_addr(*nodes), nmask, maxnode); in get_nodes()
1506 nodemask_t *nodes) in copy_nodes_to_user() argument
1526 nodes_addr(*nodes), maxnode); in copy_nodes_to_user()
1528 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; in copy_nodes_to_user()
1555 nodemask_t nodes; in kernel_mbind() local
1564 err = get_nodes(&nodes, nmask, maxnode); in kernel_mbind()
1568 return do_mbind(start, len, lmode, mode_flags, &nodes, flags); in kernel_mbind()
1650 nodemask_t nodes; in kernel_set_mempolicy() local
1658 err = get_nodes(&nodes, nmask, maxnode); in kernel_set_mempolicy()
1662 return do_set_mempolicy(lmode, mode_flags, &nodes); in kernel_set_mempolicy()
1721 /* Is the user allowed to access the target nodes? */ in kernel_migrate_pages()
1774 nodemask_t nodes; in kernel_get_mempolicy() local
1781 err = do_get_mempolicy(&pval, &nodes, addr, flags); in kernel_get_mempolicy()
1790 err = copy_nodes_to_user(nmask, maxnode, &nodes); in kernel_get_mempolicy()
1899 * if policy->nodes has movable memory only, in apply_policy_zone()
1902 * policy->nodes is intersect with node_states[N_MEMORY]. in apply_policy_zone()
1904 * policy->nodes has movable memory only. in apply_policy_zone()
1906 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) in apply_policy_zone()
1921 if (!current->il_weight || !node_isset(node, policy->nodes)) { in weighted_interleave_nodes()
1922 node = next_node_in(node, policy->nodes); in weighted_interleave_nodes()
1943 nid = next_node_in(current->il_prev, policy->nodes); in interleave_nodes()
1969 return first_node(policy->nodes); in mempolicy_slab_node()
1990 &policy->nodes); in mempolicy_slab_node()
2010 memcpy(mask, &pol->nodes, sizeof(nodemask_t)); in read_once_policy_nodemask()
2056 * node in pol->nodes (starting from ilx=0), wrapping around if ilx
2057 * exceeds the number of present nodes.
2077 * Return a nodemask representing a mempolicy for filtering nodes for
2088 *nid = first_node(pol->nodes); in policy_nodemask()
2091 nodemask = &pol->nodes; in policy_nodemask()
2098 cpuset_nodemask_valid_mems_allowed(&pol->nodes)) in policy_nodemask()
2099 nodemask = &pol->nodes; in policy_nodemask()
2181 *mask = mempolicy->nodes; in init_nodemask_of_mempolicy()
2203 * memory allocated from all nodes in system.
2219 ret = nodes_intersects(mempolicy->nodes, *mask); in mempolicy_in_oom_domain()
2233 * preferred nodes but skip the direct reclaim and allow the in alloc_pages_preferred_many()
2235 * nodes in system. in alloc_pages_preferred_many()
2274 * node and don't fall back to other nodes, as the cost of in alloc_pages_mpol()
2410 int nodes; in alloc_pages_bulk_interleave() local
2417 nodes = nodes_weight(pol->nodes); in alloc_pages_bulk_interleave()
2418 nr_pages_per_node = nr_pages / nodes; in alloc_pages_bulk_interleave()
2419 delta = nr_pages - nodes * nr_pages_per_node; in alloc_pages_bulk_interleave()
2421 for (i = 0; i < nodes; i++) { in alloc_pages_bulk_interleave()
2454 nodemask_t nodes; in alloc_pages_bulk_weighted_interleave() local
2464 /* read the nodes onto the stack, retry if done during rebind */ in alloc_pages_bulk_weighted_interleave()
2467 nnodes = read_once_policy_nodemask(pol, &nodes); in alloc_pages_bulk_weighted_interleave()
2477 if (weight && node_isset(node, nodes)) { in alloc_pages_bulk_weighted_interleave()
2507 for_each_node_mask(node, nodes) { in alloc_pages_bulk_weighted_interleave()
2522 resume_node = next_node_in(prev_node, nodes); in alloc_pages_bulk_weighted_interleave()
2525 node = next_node_in(prev_node, nodes); in alloc_pages_bulk_weighted_interleave()
2566 nr_allocated = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes, in alloc_pages_bulk_preferred_many()
2676 return !!nodes_equal(a->nodes, b->nodes); in __mpol_equal()
2823 if (node_isset(curnid, pol->nodes)) in mpol_misplaced()
2825 polnid = first_node(pol->nodes); in mpol_misplaced()
2836 * policy nodemask we don't allow numa migration to nodes in mpol_misplaced()
2845 * Optimize placement among multiple nodes in mpol_misplaced()
2848 if (node_isset(thisnid, pol->nodes)) in mpol_misplaced()
2856 * If no allowed nodes, use current [!misplaced]. in mpol_misplaced()
2858 if (node_isset(curnid, pol->nodes)) in mpol_misplaced()
2863 &pol->nodes); in mpol_misplaced()
3158 .nodes = nodemask_of_node(nid), in numa_policy_init()
3164 * enabled across suitably sized nodes (default is >= 16MB), or in numa_policy_init()
3227 nodemask_t nodes; in mpol_parse_str() local
3238 if (nodelist_parse(nodelist, nodes)) in mpol_parse_str()
3240 if (!nodes_subset(nodes, node_states[N_MEMORY])) in mpol_parse_str()
3243 nodes_clear(nodes); in mpol_parse_str()
3253 * we use first_node(nodes) to grab a single node, so here in mpol_parse_str()
3254 * nodelist (or nodes) cannot be empty. in mpol_parse_str()
3262 if (nodes_empty(nodes)) in mpol_parse_str()
3269 * Default to online nodes with memory if no nodelist in mpol_parse_str()
3272 nodes = node_states[N_MEMORY]; in mpol_parse_str()
3311 new = mpol_new(mode, mode_flags, &nodes); in mpol_parse_str()
3316 * Save nodes for mpol_to_str() to show the tmpfs mount options in mpol_parse_str()
3320 new->nodes = nodes; in mpol_parse_str()
3322 nodes_clear(new->nodes); in mpol_parse_str()
3323 node_set(first_node(nodes), new->nodes); in mpol_parse_str()
3329 * Save nodes for contextualization: this will be used to "clone" in mpol_parse_str()
3332 new->w.user_nodemask = nodes; in mpol_parse_str()
3362 nodemask_t nodes = NODE_MASK_NONE; in mpol_to_str() local
3383 nodes = pol->nodes; in mpol_to_str()
3411 if (!nodes_empty(nodes)) in mpol_to_str()
3413 nodemask_pr_args(&nodes)); in mpol_to_str()