Lines Matching +full:page +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
14 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
17 * and within each page, there is a singly-linked list of free blocks
22 * Allocation from heap involves first searching for a page with
23 * sufficient free blocks (using a next-fit-like approach) followed by
24 * a first-fit scan of the page. Deallocation inserts objects back
26 * address-ordered first fit.
29 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
31 * alloc_pages() directly, allocating compound pages so the page order
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
41 * calling alloc_pages(). As SLAB objects know their size, no separate
42 * size bookkeeping is necessary and there is essentially no allocation
43 * space overhead, and compound pages aren't needed for multi-page
47 * logic down to the page allocator, and simply doing the node accounting
55 * page flags. As a result, block allocations that can be satisfied from
78 * slob_block has a field 'units', which indicates size of block if +ve,
79 * or offset of next block if -ve (in SLOB_UNITs).
81 * Free blocks of size 1 unit simply contain the offset of the next block.
82 * Those with larger size contain their size in the first SLOB_UNIT of
108 static inline int slob_page_free(struct page *sp) in slob_page_free()
113 static void set_slob_page_free(struct page *sp, struct list_head *list) in set_slob_page_free()
115 list_add(&sp->slab_list, list); in set_slob_page_free()
119 static inline void clear_slob_page_free(struct page *sp) in clear_slob_page_free()
121 list_del(&sp->slab_list); in clear_slob_page_free()
126 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT) argument
135 int size; member
144 * Encode the given size and next info into a free slob block s.
146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument
149 slobidx_t offset = next - base; in set_slob()
151 if (size > 1) { in set_slob()
152 s[0].units = size; in set_slob()
155 s[0].units = -offset; in set_slob()
159 * Return the size of a slob block.
163 if (s->units > 0) in slob_units()
164 return s->units; in slob_units()
177 next = -s[0].units; in slob_next()
184 * Returns true if s is the last free block in its page.
193 struct page *page; in slob_new_pages() local
197 page = __alloc_pages_node(node, gfp, order); in slob_new_pages()
200 page = alloc_pages(gfp, order); in slob_new_pages()
202 if (!page) in slob_new_pages()
205 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, in slob_new_pages()
207 return page_address(page); in slob_new_pages()
212 struct page *sp = virt_to_page(b); in slob_free_pages()
214 if (current->reclaim_state) in slob_free_pages()
215 current->reclaim_state->reclaimed_slab += 1 << order; in slob_free_pages()
218 -(PAGE_SIZE << order)); in slob_free_pages()
223 * slob_page_alloc() - Allocate a slob block within a given slob_page sp.
224 * @sp: Page to look in.
225 * @size: Size of the allocation.
230 * Tries to find a chunk of memory at least @size bytes big within @page.
233 * allocation fills up @page then the page is removed from the
237 static void *slob_page_alloc(struct page *sp, size_t size, int align, in slob_page_alloc() argument
241 int delta = 0, units = SLOB_UNITS(size); in slob_page_alloc()
244 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { in slob_page_alloc()
251 * allocated block with its size, so that the block itself is in slob_page_alloc()
257 - align_offset); in slob_page_alloc()
258 delta = aligned - cur; in slob_page_alloc()
265 set_slob(aligned, avail - delta, next); in slob_page_alloc()
277 sp->freelist = next; in slob_page_alloc()
282 sp->freelist = cur + units; in slob_page_alloc()
283 set_slob(cur + units, avail - units, next); in slob_page_alloc()
286 sp->units -= units; in slob_page_alloc()
287 if (!sp->units) { in slob_page_alloc()
301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() argument
304 struct page *sp; in slob_alloc()
310 if (size < SLOB_BREAK1) in slob_alloc()
312 else if (size < SLOB_BREAK2) in slob_alloc()
318 /* Iterate through each partially free page, try to find room */ in slob_alloc()
324 * page with a matching node id in the freelist. in slob_alloc()
329 /* Enough room on this page? */ in slob_alloc()
330 if (sp->units < SLOB_UNITS(size)) in slob_alloc()
333 b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list); in slob_alloc()
340 * did not fragment the page anyway so optimisation is in slob_alloc()
349 if (!list_is_first(&sp->slab_list, slob_list)) in slob_alloc()
350 list_rotate_to_front(&sp->slab_list, slob_list); in slob_alloc()
356 /* Not enough space: must allocate a new page */ in slob_alloc()
365 sp->units = SLOB_UNITS(PAGE_SIZE); in slob_alloc()
366 sp->freelist = b; in slob_alloc()
367 INIT_LIST_HEAD(&sp->slab_list); in slob_alloc()
370 b = slob_page_alloc(sp, size, align, align_offset, &_unused); in slob_alloc()
375 memset(b, 0, size); in slob_alloc()
382 static void slob_free(void *block, int size) in slob_free() argument
384 struct page *sp; in slob_free()
392 BUG_ON(!size); in slob_free()
395 units = SLOB_UNITS(size); in slob_free()
399 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { in slob_free()
400 /* Go directly to page allocator. Do not pass slob allocator */ in slob_free()
411 /* This slob page is about to become partially free. Easy! */ in slob_free()
412 sp->units = units; in slob_free()
413 sp->freelist = b; in slob_free()
417 if (size < SLOB_BREAK1) in slob_free()
419 else if (size < SLOB_BREAK2) in slob_free()
428 * Otherwise the page is already partially free, so find reinsertion in slob_free()
431 sp->units += units; in slob_free()
433 if (b < (slob_t *)sp->freelist) { in slob_free()
434 if (b + units == sp->freelist) { in slob_free()
435 units += slob_units(sp->freelist); in slob_free()
436 sp->freelist = slob_next(sp->freelist); in slob_free()
438 set_slob(b, units, sp->freelist); in slob_free()
439 sp->freelist = b; in slob_free()
441 prev = sp->freelist; in slob_free()
469 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument
480 if (size < PAGE_SIZE - minalign) { in __do_kmalloc_node()
487 if (is_power_of_2(size)) in __do_kmalloc_node()
488 align = max(minalign, (int) size); in __do_kmalloc_node()
490 if (!size) in __do_kmalloc_node()
493 m = slob_alloc(size + minalign, gfp, align, node, minalign); in __do_kmalloc_node()
497 *m = size; in __do_kmalloc_node()
501 size, size + minalign, gfp, node); in __do_kmalloc_node()
503 unsigned int order = get_order(size); in __do_kmalloc_node()
510 size, PAGE_SIZE << order, gfp, node); in __do_kmalloc_node()
513 kmemleak_alloc(ret, size, 1, gfp); in __do_kmalloc_node()
517 void *__kmalloc(size_t size, gfp_t gfp) in __kmalloc() argument
519 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); in __kmalloc()
523 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) in __kmalloc_track_caller() argument
525 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); in __kmalloc_track_caller()
530 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, in __kmalloc_node_track_caller() argument
533 return __do_kmalloc_node(size, gfp, node, caller); in __kmalloc_node_track_caller()
540 struct page *sp; in kfree()
551 unsigned int *m = (unsigned int *)(block - align); in kfree()
556 -(PAGE_SIZE << order)); in kfree()
566 struct page *sp; in __ksize()
579 m = (unsigned int *)(block - align); in __ksize()
588 c->size += sizeof(struct slob_rcu); in __kmem_cache_create()
590 c->flags = flags; in __kmem_cache_create()
603 if (c->size < PAGE_SIZE) { in slob_alloc_node()
604 b = slob_alloc(c->size, flags, c->align, node, 0); in slob_alloc_node()
605 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, in slob_alloc_node()
606 SLOB_UNITS(c->size) * SLOB_UNIT, in slob_alloc_node()
609 b = slob_new_pages(flags, get_order(c->size), node); in slob_alloc_node()
610 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, in slob_alloc_node()
611 PAGE_SIZE << get_order(c->size), in slob_alloc_node()
615 if (b && c->ctor) { in slob_alloc_node()
617 c->ctor(b); in slob_alloc_node()
620 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); in slob_alloc_node()
631 void *__kmalloc_node(size_t size, gfp_t gfp, int node) in __kmalloc_node() argument
633 return __do_kmalloc_node(size, gfp, node, _RET_IP_); in __kmalloc_node()
644 static void __kmem_cache_free(void *b, int size) in __kmem_cache_free() argument
646 if (size < PAGE_SIZE) in __kmem_cache_free()
647 slob_free(b, size); in __kmem_cache_free()
649 slob_free_pages(b, get_order(size)); in __kmem_cache_free()
655 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); in kmem_rcu_free()
657 __kmem_cache_free(b, slob_rcu->size); in kmem_rcu_free()
662 kmemleak_free_recursive(b, c->flags); in kmem_cache_free()
663 if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) { in kmem_cache_free()
665 slob_rcu = b + (c->size - sizeof(struct slob_rcu)); in kmem_cache_free()
666 slob_rcu->size = c->size; in kmem_cache_free()
667 call_rcu(&slob_rcu->head, kmem_rcu_free); in kmem_cache_free()
669 __kmem_cache_free(b, c->size); in kmem_cache_free()
676 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
678 __kmem_cache_free_bulk(s, size, p); in kmem_cache_free_bulk()
682 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
685 return __kmem_cache_alloc_bulk(s, flags, size, p); in kmem_cache_alloc_bulk()
706 .size = sizeof(struct kmem_cache),