/* * Copyright (C) 2014, Red Hat Inc, Andrew Jones * * This work is licensed under the terms of the GNU LGPL, version 2. */ #include "alloc.h" #include "asm/spinlock.h" #include "asm/io.h" #define ALIGN_UP_MASK(x, mask) (((x) + (mask)) & ~(mask)) #define ALIGN_UP(x, a) ALIGN_UP_MASK(x, (typeof(x))(a) - 1) #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) #define PHYS_ALLOC_NR_REGIONS 256 struct phys_alloc_region { phys_addr_t base; phys_addr_t size; }; static struct phys_alloc_region regions[PHYS_ALLOC_NR_REGIONS]; static int nr_regions; static struct spinlock lock; static phys_addr_t base, top, align_min; void phys_alloc_show(void) { int i; spin_lock(&lock); printf("phys_alloc minimum alignment: 0x%llx\n", align_min); for (i = 0; i < nr_regions; ++i) printf("%016llx-%016llx [%s]\n", regions[i].base, regions[i].base + regions[i].size - 1, "USED"); printf("%016llx-%016llx [%s]\n", base, top - 1, "FREE"); spin_unlock(&lock); } void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size) { spin_lock(&lock); base = base_addr; top = base + size; align_min = DEFAULT_MINIMUM_ALIGNMENT; spin_unlock(&lock); } void phys_alloc_set_minimum_alignment(phys_addr_t align) { assert(align && !(align & (align - 1))); spin_lock(&lock); align_min = align; spin_unlock(&lock); } static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size, phys_addr_t align, bool safe) { phys_addr_t addr, size_orig = size; u64 top_safe = top; if (safe && sizeof(long) == 4) top_safe = MIN(top, 1ULL << 32); align = MAX(align, align_min); spin_lock(&lock); addr = ALIGN_UP(base, align); size += addr - base; if ((top_safe - base) < size) { printf("%s: requested=0x%llx (align=0x%llx), " "need=0x%llx, but free=0x%llx. " "top=0x%llx, top_safe=0x%llx\n", __func__, size_orig, align, size, top_safe - base, top, top_safe); spin_unlock(&lock); return INVALID_PHYS_ADDR; } base += size; if (nr_regions < PHYS_ALLOC_NR_REGIONS) { regions[nr_regions].base = addr; regions[nr_regions].size = size_orig; ++nr_regions; } else { printf("%s: WARNING: no free log entries, " "can't log allocation...\n", __func__); } spin_unlock(&lock); return addr; } static phys_addr_t phys_zalloc_aligned_safe(phys_addr_t size, phys_addr_t align, bool safe) { phys_addr_t addr = phys_alloc_aligned_safe(size, align, safe); if (addr == INVALID_PHYS_ADDR) return addr; memset(phys_to_virt(addr), 0, size); return addr; } phys_addr_t phys_alloc_aligned(phys_addr_t size, phys_addr_t align) { return phys_alloc_aligned_safe(size, align, false); } phys_addr_t phys_zalloc_aligned(phys_addr_t size, phys_addr_t align) { return phys_zalloc_aligned_safe(size, align, false); } phys_addr_t phys_alloc(phys_addr_t size) { return phys_alloc_aligned(size, align_min); } phys_addr_t phys_zalloc(phys_addr_t size) { return phys_zalloc_aligned(size, align_min); } static void *early_malloc(size_t size) { phys_addr_t addr = phys_alloc_aligned_safe(size, align_min, true); if (addr == INVALID_PHYS_ADDR) return NULL; return phys_to_virt(addr); } static void *early_calloc(size_t nmemb, size_t size) { phys_addr_t addr = phys_zalloc_aligned_safe(nmemb * size, align_min, true); if (addr == INVALID_PHYS_ADDR) return NULL; return phys_to_virt(addr); } static void early_free(void *ptr __unused) { } static void *early_memalign(size_t alignment, size_t size) { phys_addr_t addr; assert(alignment && !(alignment & (alignment - 1))); addr = phys_alloc_aligned_safe(size, alignment, true); if (addr == INVALID_PHYS_ADDR) return NULL; return phys_to_virt(addr); } static struct alloc_ops early_alloc_ops = { .malloc = early_malloc, .calloc = early_calloc, .free = early_free, .memalign = early_memalign, }; struct alloc_ops *alloc_ops = &early_alloc_ops;