Home
last modified time | relevance | path

Searched full:order (Results 1 – 25 of 3138) sorted by relevance

12345678910>>...126

/linux-3.3/include/trace/events/
Dkmem.h152 TP_PROTO(struct page *page, unsigned int order),
154 TP_ARGS(page, order),
158 __field( unsigned int, order )
163 __entry->order = order;
166 TP_printk("page=%p pfn=%lu order=%d",
169 __entry->order)
188 TP_printk("page=%p pfn=%lu order=0 cold=%d",
196 TP_PROTO(struct page *page, unsigned int order,
199 TP_ARGS(page, order, gfp_flags, migratetype),
203 __field( unsigned int, order )
[all …]
Dvmscan.h58 TP_PROTO(int nid, int order),
60 TP_ARGS(nid, order),
64 __field( int, order )
69 __entry->order = order;
72 TP_printk("nid=%d order=%d", __entry->nid, __entry->order)
77 TP_PROTO(int nid, int zid, int order),
79 TP_ARGS(nid, zid, order),
84 __field( int, order )
90 __entry->order = order;
93 TP_printk("nid=%d zid=%d order=%d",
[all …]
/linux-3.3/drivers/media/video/
Dsoc_mediabus.c26 .order = SOC_MBUS_ORDER_LE,
35 .order = SOC_MBUS_ORDER_LE,
44 .order = SOC_MBUS_ORDER_LE,
53 .order = SOC_MBUS_ORDER_LE,
62 .order = SOC_MBUS_ORDER_LE,
71 .order = SOC_MBUS_ORDER_LE,
80 .order = SOC_MBUS_ORDER_LE,
89 .order = SOC_MBUS_ORDER_LE,
98 .order = SOC_MBUS_ORDER_LE,
107 .order = SOC_MBUS_ORDER_LE,
[all …]
/linux-3.3/arch/arm/lib/
Dlib1funcs.S106 .macro ARM_DIV2_ORDER divisor, order argument
110 clz \order, \divisor
111 rsb \order, \order, #31
117 movhs \order, #16
118 movlo \order, #0
122 addhs \order, \order, #8
126 addhs \order, \order, #4
129 addhi \order, \order, #3
130 addls \order, \order, \divisor, lsr #1
137 .macro ARM_MOD_BODY dividend, divisor, order, spare
[all …]
/linux-3.3/drivers/media/video/cx18/
Dcx18-mailbox.c244 static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) in epu_dma_done() argument
253 mb = &order->mb; in epu_dma_done()
260 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ? in epu_dma_done()
266 mdl_ack = order->mdl_ack; in epu_dma_done()
290 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && in epu_dma_done()
338 static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order) in epu_debug() argument
341 char *str = order->str; in epu_debug()
343 CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str); in epu_debug()
349 static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order) in epu_cmd() argument
351 switch (order->rpu) { in epu_cmd()
[all …]
/linux-3.3/arch/s390/mm/
Dpage-states.c54 static inline void set_page_unstable(struct page *page, int order) in set_page_unstable() argument
58 for (i = 0; i < (1 << order); i++) in set_page_unstable()
65 void arch_free_page(struct page *page, int order) in arch_free_page() argument
69 set_page_unstable(page, order); in arch_free_page()
72 static inline void set_page_stable(struct page *page, int order) in set_page_stable() argument
76 for (i = 0; i < (1 << order); i++) in set_page_stable()
83 void arch_alloc_page(struct page *page, int order) in arch_alloc_page() argument
87 set_page_stable(page, order); in arch_alloc_page()
92 unsigned long flags, order, t; in arch_set_page_states() local
103 for_each_migratetype_order(order, t) { in arch_set_page_states()
[all …]
/linux-3.3/Documentation/trace/postprocess/
Dtrace-vmscan-postprocess.pl31 # Per-order events
103 my $regex_direct_begin_default = 'order=([0-9]*) may_writepage=([0-9]*) gfp_flags=([A-Z_|]*)';
105 my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
107 my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
108 my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scan…
159 # Verify fields are in the right order
180 "order", "may_writepage",
189 "nid", "order");
197 "nid", "zid", "order");
201 "isolate_mode", "order",
[all …]
/linux-3.3/mm/
Dpage_alloc.c114 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
152 static void __free_pages_ok(struct page *page, unsigned int order);
324 * Higher-order pages are called "compound pages". They are structured thusly:
334 * put_page() function. Its ->lru.prev holds the order of allocation.
335 * This usage means that zero-order pages may not be compound.
343 void prep_compound_page(struct page *page, unsigned long order) in prep_compound_page() argument
346 int nr_pages = 1 << order; in prep_compound_page()
349 set_compound_order(page, order); in prep_compound_page()
360 static int destroy_compound_page(struct page *page, unsigned long order) in destroy_compound_page() argument
363 int nr_pages = 1 << order; in destroy_compound_page()
[all …]
Dcompaction.c38 unsigned int order; /* order a direct compactor needs */ member
89 /* Found a free page, break it into order-0 pages */ in isolate_freepages_block()
455 unsigned int order; in compact_finished() local
466 * order == -1 is expected when compacting via in compact_finished()
469 if (cc->order == -1) in compact_finished()
474 watermark += (1 << cc->order); in compact_finished()
476 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) in compact_finished()
480 for (order = cc->order; order < MAX_ORDER; order++) { in compact_finished()
482 if (!list_empty(&zone->free_area[order].free_list[cc->migratetype])) in compact_finished()
486 if (order >= pageblock_order && zone->free_area[order].nr_free) in compact_finished()
[all …]
Dkmemcheck.c7 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) in kmemcheck_alloc_shadow() argument
13 pages = 1 << order; in kmemcheck_alloc_shadow()
19 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); in kmemcheck_alloc_shadow()
38 void kmemcheck_free_shadow(struct page *page, int order) in kmemcheck_free_shadow() argument
47 pages = 1 << order; in kmemcheck_free_shadow()
56 __free_pages(shadow, order); in kmemcheck_free_shadow()
99 void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, in kmemcheck_pagealloc_alloc() argument
107 pages = 1 << order; in kmemcheck_pagealloc_alloc()
116 kmemcheck_alloc_shadow(page, order, gfpflags, -1); in kmemcheck_pagealloc_alloc()
Dvmstat.c547 unsigned int order; in fill_contig_page_info() local
553 for (order = 0; order < MAX_ORDER; order++) { in fill_contig_page_info()
557 blocks = zone->free_area[order].nr_free; in fill_contig_page_info()
561 info->free_pages += blocks << order; in fill_contig_page_info()
564 if (order >= suitable_order) in fill_contig_page_info()
566 (order - suitable_order); in fill_contig_page_info()
577 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) in __fragmentation_index() argument
579 unsigned long requested = 1UL << order; in __fragmentation_index()
598 int fragmentation_index(struct zone *zone, unsigned int order) in fragmentation_index() argument
602 fill_contig_page_info(zone, order, &info); in fragmentation_index()
[all …]
Dvmscan.c58 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
61 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
64 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
65 * order-0 pages and then compact the zone
97 int order; member
101 * enough amount of memory. i.e, mode for high order allocation.
374 * reclaim/compaction.Depending on the order, we will either set the in set_reclaim_mode()
375 * sync mode or just reclaim order-0 pages later. in set_reclaim_mode()
387 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) in set_reclaim_mode()
389 else if (sc->order && priority < DEF_PRIORITY - 2) in set_reclaim_mode()
[all …]
/linux-3.3/arch/s390/include/asm/
Dsigp.h65 static inline int raw_sigp(u16 cpu, int order) in raw_sigp() argument
76 "a" (order) : "cc" , "memory"); in raw_sigp()
83 static inline int raw_sigp_p(u32 parameter, u16 cpu, int order) in raw_sigp_p() argument
94 "a" (order) : "cc" , "memory"); in raw_sigp_p()
101 static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order) in raw_sigp_ps() argument
111 : "d" (cpu), "a" (order) in raw_sigp_ps()
117 static inline int sigp(int cpu, int order) in sigp() argument
119 return raw_sigp(cpu_logical_map(cpu), order); in sigp()
122 static inline int sigp_p(u32 parameter, int cpu, int order) in sigp_p() argument
124 return raw_sigp_p(parameter, cpu_logical_map(cpu), order); in sigp_p()
[all …]
/linux-3.3/arch/c6x/mm/
Ddma-coherent.c45 static inline u32 __alloc_dma_pages(int order) in __alloc_dma_pages() argument
51 pos = bitmap_find_free_region(dma_bitmap, dma_pages, order); in __alloc_dma_pages()
57 static void __free_dma_pages(u32 addr, int order) in __free_dma_pages() argument
62 if (addr < dma_base || (pos + (1 << order)) >= dma_pages) { in __free_dma_pages()
68 bitmap_release_region(dma_bitmap, pos, order); in __free_dma_pages()
80 int order; in dma_alloc_coherent() local
85 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); in dma_alloc_coherent()
87 paddr = __alloc_dma_pages(order); in dma_alloc_coherent()
105 int order; in dma_free_coherent() local
110 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); in dma_free_coherent()
[all …]
/linux-3.3/include/linux/
Dgfp.h188 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
288 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument
291 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument
295 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
299 __alloc_pages(gfp_t gfp_mask, unsigned int order, in __alloc_pages() argument
302 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); in __alloc_pages()
306 unsigned int order) in alloc_pages_node() argument
312 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); in alloc_pages_node()
316 unsigned int order) in alloc_pages_exact_node() argument
320 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); in alloc_pages_exact_node()
[all …]
/linux-3.3/drivers/gpu/drm/
Ddrm_bufs.c598 * reallocates the buffer list of the same size order to accommodate the new
610 int order; in drm_addbufs_agp() local
623 order = drm_order(request->size); in drm_addbufs_agp()
624 size = 1 << order; in drm_addbufs_agp()
628 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; in drm_addbufs_agp()
635 DRM_DEBUG("order: %d\n", order); in drm_addbufs_agp()
642 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) in drm_addbufs_agp()
669 entry = &dma->bufs[order]; in drm_addbufs_agp()
673 return -ENOMEM; /* May only call once for each order */ in drm_addbufs_agp()
698 buf->order = order; in drm_addbufs_agp()
[all …]
/linux-3.3/drivers/net/ethernet/mellanox/mlx4/
Dmr.c60 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) in mlx4_buddy_alloc() argument
68 for (o = order; o <= buddy->max_order; ++o) in mlx4_buddy_alloc()
83 while (o > order) { in mlx4_buddy_alloc()
92 seg <<= order; in mlx4_buddy_alloc()
97 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) in mlx4_buddy_free() argument
99 seg >>= order; in mlx4_buddy_free()
103 while (test_bit(seg ^ 1, buddy->bits[order])) { in mlx4_buddy_free()
104 clear_bit(seg ^ 1, buddy->bits[order]); in mlx4_buddy_free()
105 --buddy->num_free[order]; in mlx4_buddy_free()
107 ++order; in mlx4_buddy_free()
[all …]
/linux-3.3/arch/m68k/kernel/
Ddma_mm.c25 int i, order; in dma_alloc_coherent() local
30 order = get_order(size); in dma_alloc_coherent()
32 page = alloc_pages(flag, order); in dma_alloc_coherent()
37 map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA); in dma_alloc_coherent()
39 __free_pages(page, order); in dma_alloc_coherent()
42 split_page(page, order); in dma_alloc_coherent()
44 order = 1 << order; in dma_alloc_coherent()
49 for (; i < order; i++) in dma_alloc_coherent()
/linux-3.3/Documentation/vm/
Dslub.txt7 SLUB can enable debugging only for selected slabs in order to avoid
11 In order to switch debugging on one can add a option "slub_debug"
50 F.e. in order to boot just with sanity checks and red zoning one would specify:
65 Debugging options may require the minimum possible slab order to increase as
92 in order to reduce overhead and increase cache hotness of objects.
99 order to do so you must have the slabinfo tool. Then you can do
115 governed by the order of the allocation for each slab. The allocations
123 into one slab in order for the allocation order to be acceptable.
128 slub_min_order specifies a minim order of slabs. A similar effect like
131 slub_max_order specified the order at which slub_min_objects should no
[all …]
/linux-3.3/arch/powerpc/platforms/cell/
Dras.c103 int order; member
111 static int __init cbe_ptcal_enable_on_node(int nid, int order) in cbe_ptcal_enable_on_node() argument
125 area->order = order; in cbe_ptcal_enable_on_node()
127 area->order); in cbe_ptcal_enable_on_node()
137 * page, in order to avoid prefetches in memcpy and similar in cbe_ptcal_enable_on_node()
158 __free_pages(area->pages, area->order); in cbe_ptcal_enable_on_node()
169 int order, found_mic = 0; in cbe_ptcal_enable() local
182 order = get_order(*size); in cbe_ptcal_enable()
187 cbe_ptcal_enable_on_node(of_node_to_nid(np), order); in cbe_ptcal_enable()
202 cbe_ptcal_enable_on_node(*nid, order); in cbe_ptcal_enable()
[all …]
/linux-3.3/drivers/infiniband/hw/ehca/
Dipz_pt_fn.c129 int order = ilog2(queue->pagesize) - 9; in alloc_small_queue_page() local
135 if (!list_empty(&pd->free[order])) in alloc_small_queue_page()
136 page = list_entry(pd->free[order].next, in alloc_small_queue_page()
149 list_add(&page->list, &pd->free[order]); in alloc_small_queue_page()
152 bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order); in alloc_small_queue_page()
156 if (page->fill == IPZ_SPAGE_PER_KPAGE >> order) in alloc_small_queue_page()
157 list_move(&page->list, &pd->full[order]); in alloc_small_queue_page()
161 queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9))); in alloc_small_queue_page()
163 queue->offset = bit << (order + 9); in alloc_small_queue_page()
174 int order = ilog2(queue->pagesize) - 9; in free_small_queue_page() local
[all …]
/linux-3.3/Documentation/sound/alsa/soc/
Dpops_clicks.txt21 components in a specific order. This order is different for startup and
24 Startup Order :- DAC --> Mixers --> Output PGA --> Digital Unmute
26 Shutdown Order :- Digital Mute --> Output PGA --> Mixers --> DAC
40 Startup Order - Input PGA --> Mixers --> ADC
42 Shutdown Order - ADC --> Mixers --> Input PGA
/linux-3.3/Documentation/trace/
Devents-kmem.txt41 mm_page_alloc page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s
42 mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d
43 mm_page_free page=%p pfn=%lu order=%d
44 mm_page_free_batched page=%p pfn=%lu order=%d cold=%d
68 mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d
69 mm_page_pcpu_drain page=%p pfn=%lu order=%d cpu=%d migratetype=%d
72 for order-0 pages, reduces contention on the zone->lock and reduces the
97 External fragmentation affects whether a high-order allocation will be
104 high-order allocations will start failing at some time in the future. One
/linux-3.3/arch/tile/mm/
Dhomecache.c182 void flush_remote_page(struct page *page, int order) in flush_remote_page() argument
184 int i, pages = (1 << order); in flush_remote_page()
249 void homecache_flush_cache(struct page *page, int order) in homecache_flush_cache() argument
251 int pages = 1 << order; in homecache_flush_cache()
398 void homecache_change_page_home(struct page *page, int order, int home) in homecache_change_page_home() argument
400 int i, pages = (1 << order); in homecache_change_page_home()
420 unsigned int order, int home) in homecache_alloc_pages() argument
424 page = alloc_pages(gfp_mask, order); in homecache_alloc_pages()
426 homecache_change_page_home(page, order, home); in homecache_alloc_pages()
432 unsigned int order, int home) in homecache_alloc_pages_node() argument
[all …]
/linux-3.3/arch/cris/arch-v32/drivers/pci/
Ddma.c23 int order = get_order(size); in dma_alloc_coherent() local
33 ret = (void *)__get_free_pages(gfp, order); in dma_alloc_coherent()
45 int order = get_order(size); in dma_free_coherent() local
47 if (!dma_release_from_coherent(dev, order, vaddr)) in dma_free_coherent()
48 free_pages((unsigned long)vaddr, order); in dma_free_coherent()

12345678910>>...126