Lines Matching defs:masks
938 /* We must not have empty masks here */
1897 struct cpumask ***masks;
1959 masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
1960 if (!masks)
1968 masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1969 if (!masks[i])
1979 masks[i][j] = mask;
1992 rcu_assign_pointer(sched_domains_numa_masks, masks);
2034 struct cpumask ***masks;
2042 masks = sched_domains_numa_masks;
2044 if (distances || masks) {
2049 for (i = 0; i < nr_levels && masks; i++) {
2050 if (!masks[i])
2053 kfree(masks[i][j]);
2054 kfree(masks[i]);
2056 kfree(masks);
2094 /* Set ourselves in the remote node's masks */
2124 struct cpumask ***masks;
2127 masks = rcu_dereference(sched_domains_numa_masks);
2128 if (!masks)
2131 if (!masks[i][j])
2133 cpu = cpumask_any_and_distribute(cpus, masks[i][j]);
2147 struct cpumask ***masks;
2161 if (b == k->masks) {
2199 k.masks = rcu_dereference(sched_domains_numa_masks);
2200 if (!k.masks)
2203 hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp);
2204 hop = hop_masks - k.masks;
2207 cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) :
2208 cpumask_nth_and(cpu, cpus, k.masks[0][node]);
2228 * distances and masks are handled.
2230 * during the lifetime of the system (offline nodes are taken out of the masks).
2234 struct cpumask ***masks;
2239 masks = rcu_dereference(sched_domains_numa_masks);
2240 if (!masks)
2243 return masks[hops][node];
2381 * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
2712 * The masks don't intersect (don't overlap.) We should setup one