Lines Matching defs:ai

1570 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
2386 * Allocate ai which is large enough for @nr_groups groups containing
2387 * @nr_units units. The returned ai's groups[0].cpu_map points to the
2399 struct pcpu_alloc_info *ai;
2404 base_size = ALIGN(struct_size(ai, groups, nr_groups),
2405 __alignof__(ai->groups[0].cpu_map[0]));
2406 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2411 ai = ptr;
2414 ai->groups[0].cpu_map = ptr;
2417 ai->groups[0].cpu_map[unit] = NR_CPUS;
2419 ai->nr_groups = nr_groups;
2420 ai->__ai_size = PFN_ALIGN(ai_size);
2422 return ai;
2427 * @ai: pcpu_alloc_info to free
2429 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2431 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2433 memblock_free(ai, ai->__ai_size);
2439 * @ai: allocation info to dump
2441 * Print out information about @ai using loglevel @lvl.
2444 const struct pcpu_alloc_info *ai)
2452 v = ai->nr_groups;
2461 upa = ai->alloc_size / ai->unit_size;
2466 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2467 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2469 for (group = 0; group < ai->nr_groups; group++) {
2470 const struct pcpu_group_info *gi = &ai->groups[group];
2495 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2502 * @ai contains all information necessary to initialize the first
2505 * @ai->static_size is the size of static percpu area.
2507 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2515 * @ai->dyn_size determines the number of bytes available for dynamic
2516 * allocation in the first chunk. The area between @ai->static_size +
2517 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2519 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2520 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2521 * @ai->dyn_size.
2523 * @ai->atom_size is the allocation atom size and used as alignment
2526 * @ai->alloc_size is the allocation size and always multiple of
2527 * @ai->atom_size. This is larger than @ai->atom_size if
2528 * @ai->unit_size is larger than @ai->atom_size.
2530 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2533 * groupings. If @ai->nr_groups is zero, a single group containing
2547 void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2550 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2566 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2572 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2574 PCPU_SETUP_BUG_ON(!ai->static_size);
2579 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2580 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2581 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2582 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2583 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2584 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2587 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2590 alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2593 alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2608 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2609 const struct pcpu_group_info *gi = &ai->groups[group];
2612 group_sizes[group] = gi->nr_units * ai->unit_size;
2624 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2642 pcpu_dump_alloc_info(KERN_DEBUG, ai);
2644 pcpu_nr_groups = ai->nr_groups;
2651 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2653 pcpu_atom_size = ai->atom_size;
2657 pcpu_stats_save_ai(ai);
2684 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2685 dyn_size = ai->dyn_size - (static_size - ai->static_size);
2699 if (ai->reserved_size)
2701 ai->reserved_size);
2702 tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size;
2802 struct pcpu_alloc_info *ai;
2892 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2893 if (!ai)
2895 cpu_map = ai->groups[0].cpu_map;
2898 ai->groups[group].cpu_map = cpu_map;
2902 ai->static_size = static_size;
2903 ai->reserved_size = reserved_size;
2904 ai->dyn_size = dyn_size;
2905 ai->unit_size = alloc_size / upa;
2906 ai->atom_size = atom_size;
2907 ai->alloc_size = alloc_size;
2910 struct pcpu_group_info *gi = &ai->groups[group];
2917 gi->base_offset = unit * ai->unit_size;
2927 return ai;
3006 struct pcpu_alloc_info *ai;
3011 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3013 if (IS_ERR(ai))
3014 return PTR_ERR(ai);
3016 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
3017 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
3027 for (group = 0; group < ai->nr_groups; group++) {
3028 struct pcpu_group_info *gi = &ai->groups[group];
3037 ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
3051 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
3069 for (group = 0; group < ai->nr_groups; group++) {
3070 struct pcpu_group_info *gi = &ai->groups[group];
3073 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3076 pcpu_fc_free(ptr, ai->unit_size);
3080 memcpy(ptr, __per_cpu_start, ai->static_size);
3081 pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
3086 for (group = 0; group < ai->nr_groups; group++) {
3087 ai->groups[group].base_offset = areas[group] - base;
3091 PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
3092 ai->dyn_size, ai->unit_size);
3094 pcpu_setup_first_chunk(ai, base);
3098 for (group = 0; group < ai->nr_groups; group++)
3101 ai->groups[group].nr_units * ai->unit_size);
3103 pcpu_free_alloc_info(ai);
3180 struct pcpu_alloc_info *ai;
3191 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
3192 if (IS_ERR(ai))
3193 return PTR_ERR(ai);
3194 BUG_ON(ai->nr_groups != 1);
3195 upa = ai->alloc_size/ai->unit_size;
3197 if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
3198 pcpu_free_alloc_info(ai);
3202 unit_pages = ai->unit_size >> PAGE_SHIFT;
3212 unsigned int cpu = ai->groups[0].cpu_map[unit];
3230 vm.size = num_possible_cpus() * ai->unit_size;
3235 (unsigned long)vm.addr + unit * ai->unit_size;
3246 flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
3249 memcpy((void *)unit_addr, __per_cpu_start, ai->static_size);
3254 unit_pages, psize_str, ai->static_size,
3255 ai->reserved_size, ai->dyn_size);
3257 pcpu_setup_first_chunk(ai, vm.addr);
3266 pcpu_free_alloc_info(ai);
3322 struct pcpu_alloc_info *ai;
3325 ai = pcpu_alloc_alloc_info(1, 1);
3327 if (!ai || !fc)
3332 ai->dyn_size = unit_size;
3333 ai->unit_size = unit_size;
3334 ai->atom_size = unit_size;
3335 ai->alloc_size = unit_size;
3336 ai->groups[0].nr_units = 1;
3337 ai->groups[0].cpu_map[0] = 0;
3339 pcpu_setup_first_chunk(ai, fc);
3340 pcpu_free_alloc_info(ai);