Home
last modified time | relevance | path

Searched full:order (Results 1 – 25 of 6095) sorted by relevance

12345678910>>...244

/linux-6.8/arch/arm64/kvm/hyp/nvhe/
Dpage_alloc.c25 * Order 2 1 0
28 * __find_buddy_nocheck(pool, page 0, order 0) => page 1
29 * __find_buddy_nocheck(pool, page 0, order 1) => page 2
30 * __find_buddy_nocheck(pool, page 1, order 0) => page 0
31 * __find_buddy_nocheck(pool, page 2, order 0) => page 3
35 unsigned short order) in __find_buddy_nocheck() argument
39 addr ^= (PAGE_SIZE << order); in __find_buddy_nocheck()
54 unsigned short order) in __find_buddy_avail() argument
56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); in __find_buddy_avail()
58 if (!buddy || buddy->order != order || buddy->refcount) in __find_buddy_avail()
[all …]
/linux-6.8/include/trace/events/
Dcompaction.h168 int order,
172 TP_ARGS(order, gfp_mask, prio),
175 __field(int, order)
181 __entry->order = order;
186 TP_printk("order=%d gfp_mask=%s priority=%d",
187 __entry->order,
195 int order,
198 TP_ARGS(zone, order, ret),
203 __field(int, order)
210 __entry->order = order;
[all …]
Dvmscan.h68 TP_PROTO(int nid, int zid, int order),
70 TP_ARGS(nid, zid, order),
75 __field( int, order )
81 __entry->order = order;
84 TP_printk("nid=%d order=%d",
86 __entry->order)
91 TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags),
93 TP_ARGS(nid, zid, order, gfp_flags),
98 __field( int, order )
105 __entry->order = order;
[all …]
Dkmem.h138 TP_PROTO(struct page *page, unsigned int order),
140 TP_ARGS(page, order),
144 __field( unsigned int, order )
149 __entry->order = order;
152 TP_printk("page=%p pfn=0x%lx order=%d",
155 __entry->order)
172 TP_printk("page=%p pfn=0x%lx order=0",
179 TP_PROTO(struct page *page, unsigned int order,
182 TP_ARGS(page, order, gfp_flags, migratetype),
186 __field( unsigned int, order )
[all …]
Doom.h35 int order,
42 TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check),
47 __field( int, order)
58 __entry->order = order;
66 …TP_printk("node=%d zone=%-8s order=%d reclaimable=%lu available=%lu min_wmark=%lu no_progress_loop…
68 __entry->order,
157 TP_PROTO(int order,
164 TP_ARGS(order, priority, result, retries, max_retries, ret),
167 __field( int, order)
176 __entry->order = order;
[all …]
/linux-6.8/scripts/atomic/
Dgen-atomic-fallback.sh8 #gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
16 local order="$1"; shift
28 #gen_order_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
35 local order="$1"; shift
37 local tmpl_order=${order#_}
39 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
42 #gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
49 local order="$1"; shift
51 local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
52 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
[all …]
Dgen-atomic-instrumented.sh37 local order="$1"; shift
39 if [ "${order}" = "_release" ]; then
41 elif [ -z "${order}" ] && ! meta_in "$meta" "slv"; then
52 #gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
59 local order="$1"; shift
63 local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
67 local checks="$(gen_params_checks "${meta}" "${order}" "$@")"
71 gen_kerneldoc "" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@"
88 local order="$1"; shift
92 case "$order" in
[all …]
/linux-6.8/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_buddy.c29 /* Allocating max_order bitmaps, one for each order */ in mlx5dr_buddy_init()
39 /* In the beginning, we have only one order that is available for in mlx5dr_buddy_init()
75 unsigned int *order) in dr_buddy_find_free_seg() argument
88 "ICM Buddy: failed finding free mem for order %d\n", in dr_buddy_find_free_seg()
99 *order = order_iter; in dr_buddy_find_free_seg()
106 * @order: Order of the buddy to update.
110 * It uses the data structures of the buddy system in order to find the first
111 * area of free place, starting from the current order till the maximum order
120 unsigned int order, in mlx5dr_buddy_alloc_mem() argument
126 err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter); in mlx5dr_buddy_alloc_mem()
[all …]
/linux-6.8/mm/
Dpage_alloc.c230 static void __free_pages_ok(struct page *page, unsigned int order,
304 static bool page_contains_unaccepted(struct page *page, unsigned int order);
305 static void accept_page(struct page *page, unsigned int order);
306 static bool try_to_accept_memory(struct zone *zone, unsigned int order);
332 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
334 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
521 static inline unsigned int order_to_pindex(int migratetype, int order) in order_to_pindex() argument
524 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex()
525 VM_BUG_ON(order != pageblock_order); in order_to_pindex()
529 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in order_to_pindex()
[all …]
Dcompaction.c53 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
54 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
57 * Page order with-respect-to which proactive compaction
87 unsigned int i, order, nr_pages; in split_map_pages() local
94 order = page_private(page); in split_map_pages()
95 nr_pages = 1 << order; in split_map_pages()
97 post_alloc_hook(page, order, __GFP_MOVABLE); in split_map_pages()
98 if (order) in split_map_pages()
99 split_page(page, order); in split_map_pages()
153 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
[all …]
/linux-6.8/drivers/gpu/drm/ttm/
Dttm_pool.c54 * @vaddr: original vaddr return for the mapping and order in the lower bits
79 /* Allocate pages of size 1 << order with the given gfp_flags */
81 unsigned int order) in ttm_pool_alloc_page() argument
88 /* Don't set the __GFP_COMP flag for higher order allocations. in ttm_pool_alloc_page()
92 if (order) in ttm_pool_alloc_page()
97 p = alloc_pages_node(pool->nid, gfp_flags, order); in ttm_pool_alloc_page()
99 p->private = order; in ttm_pool_alloc_page()
107 if (order) in ttm_pool_alloc_page()
110 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page()
123 dma->vaddr = (unsigned long)vaddr | order; in ttm_pool_alloc_page()
[all …]
/linux-6.8/tools/testing/radix-tree/
Dmultiorder.c3 * multiorder.c: Multi-order radix tree entry testing
16 unsigned order) in item_insert_order() argument
18 XA_STATE_ORDER(xas, xa, index, order); in item_insert_order()
19 struct item *item = item_create(index, order); in item_insert_order()
42 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; in multiorder_iteration() local
47 err = item_insert_order(xa, index[i], order[i]); in multiorder_iteration()
53 if (j <= (index[i] | ((1 << order[i]) - 1))) in multiorder_iteration()
58 int height = order[i] / XA_CHUNK_SHIFT; in multiorder_iteration()
60 unsigned long mask = (1UL << order[i]) - 1; in multiorder_iteration()
66 assert(item->order == order[i]); in multiorder_iteration()
[all …]
/linux-6.8/arch/arm/lib/
Dlib1funcs.S106 .macro ARM_DIV2_ORDER divisor, order argument
110 clz \order, \divisor
111 rsb \order, \order, #31
117 movhs \order, #16
118 movlo \order, #0
122 addhs \order, \order, #8
126 addhs \order, \order, #4
129 addhi \order, \order, #3
130 addls \order, \order, \divisor, lsr #1
137 .macro ARM_MOD_BODY dividend, divisor, order, spare
[all …]
/linux-6.8/mm/kmsan/
Dinit.c105 * by their order: when kmsan_memblock_free_pages() is called for the first
106 * time with a certain order, it is reserved as a shadow block, for the second
109 * after which held_back[order] can be used again.
114 bool kmsan_memblock_free_pages(struct page *page, unsigned int order) in kmsan_memblock_free_pages() argument
118 if (!held_back[order].shadow) { in kmsan_memblock_free_pages()
119 held_back[order].shadow = page; in kmsan_memblock_free_pages()
122 if (!held_back[order].origin) { in kmsan_memblock_free_pages()
123 held_back[order].origin = page; in kmsan_memblock_free_pages()
126 shadow = held_back[order].shadow; in kmsan_memblock_free_pages()
127 origin = held_back[order].origin; in kmsan_memblock_free_pages()
[all …]
/linux-6.8/lib/
Dtest_xarray.c72 unsigned order, void *entry, gfp_t gfp) in xa_store_order() argument
74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order()
177 unsigned int order; in check_xa_mark_1() local
207 for (order = 2; order < max_order; order++) { in check_xa_mark_1()
208 unsigned long base = round_down(index, 1UL << order); in check_xa_mark_1()
209 unsigned long next = base + (1UL << order); in check_xa_mark_1()
217 xa_store_order(xa, index, order, xa_mk_index(index), in check_xa_mark_1()
328 unsigned int order; in check_xa_shrink() local
353 for (order = 0; order < max_order; order++) { in check_xa_shrink()
354 unsigned long max = (1UL << order) - 1; in check_xa_shrink()
[all …]
/linux-6.8/drivers/gpu/drm/tests/
Ddrm_buddy_test.c19 static inline u64 get_size(int order, u64 chunk_size) in get_size() argument
21 return (1 << order) * chunk_size; in get_size()
28 unsigned int i, count, *order; in drm_test_buddy_alloc_range_bias() local
43 order = drm_random_order(count, &prng); in drm_test_buddy_alloc_range_bias()
44 KUNIT_EXPECT_TRUE(test, order); in drm_test_buddy_alloc_range_bias()
48 * in some random order allocate within each bias, using various in drm_test_buddy_alloc_range_bias()
57 bias_start = order[i] * bias_size; in drm_test_buddy_alloc_range_bias()
162 kfree(order); in drm_test_buddy_alloc_range_bias()
319 int order, top; in drm_test_buddy_alloc_pathological() local
327 * order within. This should leave the mm with exactly one in drm_test_buddy_alloc_pathological()
[all …]
/linux-6.8/kernel/bpf/
Dcgroup_iter.c13 * 1. Walk the descendants of a cgroup in pre-order.
14 * 2. Walk the descendants of a cgroup in post-order.
18 * For walking descendants, cgroup_iter can walk in either pre-order or
19 * post-order. For walking ancestors, the iter walks up from a cgroup to
40 * EOPNOTSUPP. In order to work around, the user may have to update their
54 int order; member
77 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_start()
79 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_start()
110 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_next()
112 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_next()
[all …]
/linux-6.8/drivers/media/pci/cx18/
Dcx18-mailbox.c231 static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) in epu_dma_done() argument
240 mb = &order->mb; in epu_dma_done()
247 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ? in epu_dma_done()
253 mdl_ack = order->mdl_ack; in epu_dma_done()
277 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && in epu_dma_done()
324 static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order) in epu_debug() argument
327 char *str = order->str; in epu_debug()
329 CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str); in epu_debug()
335 static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order) in epu_cmd() argument
337 switch (order->rpu) { in epu_cmd()
[all …]
/linux-6.8/drivers/gpu/drm/lib/
Ddrm_random.c16 void drm_random_reorder(unsigned int *order, unsigned int count, in drm_random_reorder() argument
24 swap(order[i], order[j]); in drm_random_reorder()
31 unsigned int *order, i; in drm_random_order() local
33 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); in drm_random_order()
34 if (!order) in drm_random_order()
35 return order; in drm_random_order()
38 order[i] = i; in drm_random_order()
40 drm_random_reorder(order, count, state); in drm_random_order()
41 return order; in drm_random_order()
/linux-6.8/drivers/gpu/drm/ttm/tests/
Dttm_pool_test.c14 unsigned int order; member
81 unsigned long order = __fls(size / PAGE_SIZE); in ttm_pool_pre_populated() local
84 tt = ttm_tt_kunit_init(test, order, caching, size); in ttm_pool_pre_populated()
104 .order = 0,
108 .order = 2,
112 .order = MAX_PAGE_ORDER + 1,
116 .order = 0,
121 .order = MAX_PAGE_ORDER + 1,
144 unsigned int expected_num_pages = 1 << params->order; in ttm_pool_alloc_basic()
168 if (params->order <= MAX_PAGE_ORDER) { in ttm_pool_alloc_basic()
[all …]
/linux-6.8/arch/riscv/mm/
Dhugetlbpage.c35 unsigned long order; in huge_pte_alloc() local
68 for_each_napot_order(order) { in huge_pte_alloc()
69 if (napot_cont_size(order) == sz) { in huge_pte_alloc()
70 pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); in huge_pte_alloc()
88 unsigned long order; in huge_pte_offset() local
119 for_each_napot_order(order) { in huge_pte_offset()
120 if (napot_cont_size(order) == sz) { in huge_pte_offset()
121 pte = pte_offset_huge(pmd, addr & napot_cont_mask(order)); in huge_pte_offset()
186 unsigned long order; in arch_make_huge_pte() local
188 for_each_napot_order(order) { in arch_make_huge_pte()
[all …]
/linux-6.8/include/linux/
Dgfp.h64 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
172 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument
175 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument
178 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
180 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
192 /* Bulk allocate order-0 pages */
233 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument
238 return __alloc_pages(gfp_mask, order, nid, NULL); in __alloc_pages_node()
242 struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid) in __folio_alloc_node() argument
247 return __folio_alloc(gfp, order, nid, NULL); in __folio_alloc_node()
[all …]
Dcompaction.h61 * Number of free order-0 pages that should be available above given watermark
65 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument
69 * free scanner may have up to 1 << order pages on its list and then in compact_gap()
70 * try to split an (order - 1) free page. At that point, a gap of in compact_gap()
71 * 1 << order might not be enough, so it's safer to require twice that in compact_gap()
80 return 2UL << order; in compact_gap()
85 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
86 extern int fragmentation_index(struct zone *zone, unsigned int order);
88 unsigned int order, unsigned int alloc_flags,
92 extern bool compaction_suitable(struct zone *zone, int order,
[all …]
/linux-6.8/arch/riscv/kvm/
Dtlb.c22 unsigned long order) in kvm_riscv_local_hfence_gvma_vmid_gpa() argument
26 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_vmid_gpa()
33 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa()
38 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa()
50 unsigned long order) in kvm_riscv_local_hfence_gvma_gpa() argument
54 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_gpa()
61 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa()
66 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa()
81 unsigned long order) in kvm_riscv_local_hfence_vvma_asid_gva() argument
85 if (PTRS_PER_PTE < (gvsz >> order)) { in kvm_riscv_local_hfence_vvma_asid_gva()
[all …]
/linux-6.8/Documentation/userspace-api/media/v4l/
Dfield-order.rst3 .. _field-order:
6 Field Order
25 which field of a frame is older, the *temporal order*.
30 even) fields, the *spatial order*: The first line of the top field is
39 creating a natural order.
44 and spatial order of fields. The diagrams below should make this
48 bus in the same order they were captured, so if the top field was
53 order. Some drivers may permit the selection of a different order, to
76 - Applications request this field order when any field format
78 e.g. the requested image size, and return the actual field order.
[all …]

12345678910>>...244