/linux-6.15/Documentation/core-api/ |
D | min_heap.rst | 4 Min Heap API 12 The Min Heap API provides a set of functions and macros for managing min-heaps 13 in the Linux kernel. A min-heap is a binary tree structure where the value of 17 This document provides a guide to the Min Heap API, detailing how to define and 36 Min-Heap Definition 39 The core data structure for representing a min-heap is defined using the 41 you to define a min-heap with a preallocated buffer or dynamically allocated 50 size_t nr; /* Number of elements in the heap */ 52 _type *data; /* Pointer to the heap data */ 58 A typical heap structure will include a counter for the number of elements [all …]
|
/linux-6.15/drivers/gpu/drm/panthor/ |
D | panthor_heap.c | 16 * The GPU heap context is an opaque structure used by the GPU to track the 17 * heap allocations. The driver should only touch it to initialize it (zero all 24 * struct panthor_heap_chunk_header - Heap chunk header 28 * @next: Next heap chunk in the list. 39 * struct panthor_heap_chunk - Structure used to keep track of allocated heap chunks. 42 /** @node: Used to insert the heap chunk in panthor_heap::chunks. */ 45 /** @bo: Buffer object backing the heap chunk. */ 50 * struct panthor_heap - Structure used to manage tiler heap contexts. 53 /** @chunks: List containing all heap chunks allocated so far. */ 71 /** @chunk_count: Number of heap chunks currently allocated. */ [all …]
|
/linux-6.15/drivers/dma-buf/ |
D | dma-heap.c | 12 #include <linux/dma-heap.h> 19 #include <uapi/linux/dma-heap.h> 26 * struct dma_heap - represents a dmabuf heap in the system 28 * @ops: ops struct for this heap 29 * @priv: private data for this heap 30 * @heap_devt: heap device node 32 * @heap_cdev: heap char device 34 * Represents a heap of memory from which buffers can be made. 51 static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, in dma_heap_buffer_alloc() argument 66 dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags); in dma_heap_buffer_alloc() [all …]
|
/linux-6.15/lib/ |
D | test_min_heap.c | 5 * Test cases for the min max heap. 27 struct min_heap_test *heap, in pop_verify_heap() argument 30 int *values = heap->data; in pop_verify_heap() 35 min_heap_pop_inline(heap, funcs, NULL); in pop_verify_heap() 36 while (heap->nr > 0) { in pop_verify_heap() 51 min_heap_pop_inline(heap, funcs, NULL); in pop_verify_heap() 60 struct min_heap_test heap = { in test_heapify_all() local 72 min_heapify_all_inline(&heap, &funcs, NULL); in test_heapify_all() 73 err = pop_verify_heap(min_heap, &heap, &funcs); in test_heapify_all() 77 heap.nr = ARRAY_SIZE(values); in test_heapify_all() [all …]
|
D | min_heap.c | 5 void __min_heap_init(min_heap_char *heap, void *data, size_t size) in __min_heap_init() argument 7 __min_heap_init_inline(heap, data, size); in __min_heap_init() 11 void *__min_heap_peek(struct min_heap_char *heap) in __min_heap_peek() argument 13 return __min_heap_peek_inline(heap); in __min_heap_peek() 17 bool __min_heap_full(min_heap_char *heap) in __min_heap_full() argument 19 return __min_heap_full_inline(heap); in __min_heap_full() 23 void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size, in __min_heap_sift_down() argument 26 __min_heap_sift_down_inline(heap, pos, elem_size, func, args); in __min_heap_sift_down() 30 void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx, in __min_heap_sift_up() argument 33 __min_heap_sift_up_inline(heap, elem_size, idx, func, args); in __min_heap_sift_up() [all …]
|
/linux-6.15/include/linux/ |
D | min_heap.h | 10 * The Min Heap API provides utilities for managing min-heaps, a binary tree 21 * Data structure to hold a min-heap. 22 * @nr: Number of elements currently in the heap. 24 * @data: Pointer to the start of array holding the heap elements. 25 * @preallocated: Start of the static preallocated array holding the heap elements. 44 * @less: Partial order function for this heap. 195 * @i: the offset of the heap element whose parent is sought. Non-zero. 219 /* Initialize a min-heap. */ 221 void __min_heap_init_inline(min_heap_char *heap, void *data, size_t size) in __min_heap_init_inline() argument 223 heap->nr = 0; in __min_heap_init_inline() [all …]
|
D | dma-heap.h | 17 * struct dma_heap_ops - ops to operate on a given heap 23 struct dma_buf *(*allocate)(struct dma_heap *heap, 30 * struct dma_heap_export_info - information needed to export a new dmabuf heap 32 * @ops: ops struct for this heap 33 * @priv: heap exporter private data 35 * Information needed to export a new dmabuf heap. 43 void *dma_heap_get_drvdata(struct dma_heap *heap); 45 const char *dma_heap_get_name(struct dma_heap *heap);
|
/linux-6.15/fs/ubifs/ |
D | lprops.c | 22 * get_heap_comp_val - get the LEB properties value for heap comparisons. 39 * move_up_lpt_heap - move a new heap entry up as far as possible. 41 * @heap: LEB category heap 45 * New entries to a heap are added at the bottom and then moved up until the 50 static void move_up_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, in move_up_lpt_heap() argument 57 return; /* Already top of the heap */ in move_up_lpt_heap() 59 /* Compare to parent and, if greater, move up the heap */ in move_up_lpt_heap() 63 val2 = get_heap_comp_val(heap->arr[ppos], cat); in move_up_lpt_heap() 67 heap->arr[ppos]->hpos = hpos; in move_up_lpt_heap() 68 heap->arr[hpos] = heap->arr[ppos]; in move_up_lpt_heap() [all …]
|
D | find.c | 45 struct ubifs_lpt_heap *heap; in valuable() local 51 heap = &c->lpt_heap[cat - 1]; in valuable() 52 if (heap->cnt < heap->max_cnt) in valuable() 130 struct ubifs_lpt_heap *heap; in scan_for_dirty() local 134 /* There may be an LEB with enough dirty space on the free heap */ in scan_for_dirty() 135 heap = &c->lpt_heap[LPROPS_FREE - 1]; in scan_for_dirty() 136 for (i = 0; i < heap->cnt; i++) { in scan_for_dirty() 137 lprops = heap->arr[i]; in scan_for_dirty() 145 * A LEB may have fallen off of the bottom of the dirty heap, and ended in scan_for_dirty() 199 * dirty index heap, and it falls-back to LPT scanning if the heaps are empty [all …]
|
/linux-6.15/Documentation/ABI/testing/ |
D | sysfs-kernel-mm-cma | 6 heap name (also sometimes called CMA areas). 8 Each CMA heap subdirectory (that is, each 9 /sys/kernel/mm/cma/<cma-heap-name> directory) contains the 15 What: /sys/kernel/mm/cma/<cma-heap-name>/alloc_pages_success 21 What: /sys/kernel/mm/cma/<cma-heap-name>/alloc_pages_fail 27 What: /sys/kernel/mm/cma/<cma-heap-name>/release_pages_success 33 What: /sys/kernel/mm/cma/<cma-heap-name>/total_pages 39 What: /sys/kernel/mm/cma/<cma-heap-name>/available_pages
|
/linux-6.15/drivers/dma-buf/heaps/ |
D | Kconfig | 2 bool "DMA-BUF System Heap" 5 Choose this option to enable the system dmabuf heap. The system heap 9 bool "DMA-BUF CMA Heap" 12 Choose this option to enable dma-buf CMA heap. This heap is backed
|
D | cma_heap.c | 3 * DMABUF CMA heap exporter 8 * Also utilizing parts of Andrew Davis' SRAM heap: 14 #include <linux/dma-heap.h> 27 struct dma_heap *heap; member 32 struct cma_heap *heap; member 247 struct cma_heap *cma_heap = buffer->heap; in cma_heap_dma_buf_release() 275 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, in cma_heap_allocate() argument 280 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); in cma_heap_allocate() 339 buffer->heap = cma_heap; in cma_heap_allocate() 343 exp_info.exp_name = dma_heap_get_name(heap); in cma_heap_allocate() [all …]
|
/linux-6.15/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | base.c | 255 nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type) in nvkm_mmu_type() argument 257 if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) { in nvkm_mmu_type() 258 mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type; in nvkm_mmu_type() 259 mmu->type[mmu->type_nr].heap = heap; in nvkm_mmu_type() 268 if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) { in nvkm_mmu_heap() 269 mmu->heap[mmu->heap_nr].type = type; in nvkm_mmu_heap() 270 mmu->heap[mmu->heap_nr].size = size; in nvkm_mmu_heap() 282 int heap; in nvkm_mmu_host() local 285 heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL); in nvkm_mmu_host() 286 nvkm_mmu_type(mmu, heap, type); in nvkm_mmu_host() [all …]
|
/linux-6.15/drivers/gpu/drm/nouveau/include/nvkm/core/ |
D | mm.h | 12 u8 heap; member 34 int nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block); 36 int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, 38 int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, 44 nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap) in nvkm_mm_heap_size() argument 49 if (node->heap == heap) in nvkm_mm_heap_size()
|
/linux-6.15/drivers/md/bcache/ |
D | movinggc.c | 197 return (b = min_heap_peek(&ca->heap)[0]) ? GC_SECTORS_USED(b) : 0; in bucket_heap_top() 219 ca->heap.nr = 0; in bch_moving_gc() 228 if (!min_heap_full(&ca->heap)) { in bch_moving_gc() 230 min_heap_push(&ca->heap, &b, &callbacks, NULL); in bch_moving_gc() 231 } else if (!new_bucket_cmp(&b, min_heap_peek(&ca->heap), ca)) { in bch_moving_gc() 235 ca->heap.data[0] = b; in bch_moving_gc() 236 min_heap_sift_down(&ca->heap, 0, &callbacks, NULL); in bch_moving_gc() 241 if (ca->heap.nr) { in bch_moving_gc() 242 b = min_heap_peek(&ca->heap)[0]; in bch_moving_gc() 243 min_heap_pop(&ca->heap, &callbacks, NULL); in bch_moving_gc() [all …]
|
D | bset.c | 60 min_heap_init(&iter.heap, NULL, MAX_BSETS); in __bch_count_data() 75 min_heap_init(&iter.heap, NULL, MAX_BSETS); in __bch_check_keys() 117 struct bkey *k = iter->heap.data->k, *next = bkey_next(k); in bch_btree_iter_next_check() 119 if (next < iter->heap.data->end && in bch_btree_iter_next_check() 410 * a heap), it converts a node in the tree - referenced by array index - to the 892 min_heap_init(&iter.heap, NULL, MAX_BSETS); in bch_btree_insert_key() 1098 return !iter->heap.nr; in btree_iter_end() 1110 BUG_ON(!min_heap_push(&iter->heap, in bch_btree_iter_push() 1123 iter->heap.size = ARRAY_SIZE(iter->heap.preallocated); in __bch_btree_iter_init() 1124 iter->heap.nr = 0; in __bch_btree_iter_init() [all …]
|
/linux-6.15/drivers/gpu/drm/nouveau/nvkm/core/ |
D | mm.c | 99 b->heap = a->heap; in region_head() 111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_head() argument 122 if (unlikely(heap != NVKM_MM_HEAP_ANY)) { in nvkm_mm_head() 123 if (this->heap != heap) in nvkm_mm_head() 175 b->heap = a->heap; in region_tail() 186 nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_tail() argument 198 if (unlikely(heap != NVKM_MM_HEAP_ANY)) { in nvkm_mm_tail() 199 if (this->heap != heap) in nvkm_mm_tail() 240 nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block) in nvkm_mm_init() argument 277 node->heap = heap; in nvkm_mm_init()
|
/linux-6.15/tools/include/nolibc/ |
D | stdlib.h | 73 struct nolibc_heap *heap; in free() local 78 heap = container_of(ptr, struct nolibc_heap, user_p); in free() 79 munmap(heap, heap->len); in free() 134 struct nolibc_heap *heap; in malloc() local 137 len = sizeof(*heap) + len; in malloc() 139 heap = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, in malloc() 141 if (__builtin_expect(heap == MAP_FAILED, 0)) in malloc() 144 heap->len = len; in malloc() 145 return heap->user_p; in malloc() 159 * No need to zero the heap, the MAP_ANONYMOUS in malloc() in calloc() [all …]
|
/linux-6.15/arch/powerpc/include/asm/ |
D | rheap.h | 4 * Header file for the implementation of a remote heap. 49 /* Create a remote heap dynamically */ 52 /* Destroy a remote heap, created by rh_create() */ 65 /* Allocate the given size from the remote heap (with alignment) */ 69 /* Allocate the given size from the remote heap */ 83 /* Simple dump of remote heap info */
|
/linux-6.15/drivers/gpu/drm/nouveau/nvif/ |
D | mmu.c | 35 kfree(mmu->heap); in nvif_mmu_dtor() 53 mmu->heap = NULL; in nvif_mmu_ctor() 72 mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap), in nvif_mmu_ctor() 76 if (ret = -ENOMEM, !mmu->heap || !mmu->type) in nvif_mmu_ctor() 92 mmu->heap[i].size = args.size; in nvif_mmu_ctor() 112 mmu->type[i].heap = args.heap; in nvif_mmu_ctor()
|
/linux-6.15/kernel/sched/ |
D | cpudeadline.c | 110 * @cp: the cpudl max-heap context 165 * cpudl_clear - remove a CPU from the cpudl max-heap 166 * @cp: the cpudl max-heap context 204 * cpudl_set - update the cpudl max-heap 205 * @cp: the cpudl max-heap context 241 * @cp: the cpudl max-heap context 251 * @cp: the cpudl max-heap context 261 * @cp: the cpudl max-heap context 289 * @cp: the cpudl max-heap context
|
/linux-6.15/lib/zlib_deflate/ |
D | deftree.c | 85 * need for the L_CODES extra codes used during heap construction. However 289 /* Index within the heap array of least frequent node in the Huffman tree */ 293 * Remove the smallest element from the heap and recreate the heap with 294 * one less element. Updates heap and heap_len. 298 top = s->heap[SMALLEST]; \ 299 s->heap[SMALLEST] = s->heap[s->heap_len--]; \ 312 * Restore the heap property by moving down the tree starting at node k, 314 * when the heap property is re-established (each father smaller than its 323 int v = s->heap[k]; in pqdownheap() 328 smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { in pqdownheap() [all …]
|
/linux-6.15/drivers/accel/amdxdna/ |
D | aie2_ctx.c | 59 struct amdxdna_gem_obj *heap = hwctx->priv->heap; in aie2_hwctx_restart() local 69 heap->mem.userptr, heap->mem.size); in aie2_hwctx_restart() 532 struct amdxdna_gem_obj *heap; in aie2_hwctx_init() local 542 heap = client->dev_heap; in aie2_hwctx_init() 543 if (!heap) { in aie2_hwctx_init() 544 XDNA_ERR(xdna, "The client dev heap object not exist"); in aie2_hwctx_init() 549 drm_gem_object_get(to_gobj(heap)); in aie2_hwctx_init() 551 priv->heap = heap; in aie2_hwctx_init() 554 ret = amdxdna_gem_pin(heap); in aie2_hwctx_init() 556 XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret); in aie2_hwctx_init() [all …]
|
/linux-6.15/Documentation/userspace-api/ |
D | dma-buf-heaps.rst | 14 A heap represents a specific allocator. The Linux kernel currently supports the 17 - The ``system`` heap allocates virtually contiguous, cacheable, buffers. 19 - The ``cma`` heap allocates physically contiguous, cacheable,
|
/linux-6.15/arch/x86/boot/ |
D | main.c | 22 char *HEAP = _end; variable 23 char *heap_end = _end; /* Default end of heap = no heap */ 128 /* Boot protocol 2.00 only, no heap available */ in init_heap() 145 /* End of heap check */ in main()
|