Home
last modified time | relevance | path

Searched refs:alloc (Results 1 – 25 of 571) sorted by relevance

12345678910>>...23

/linux/drivers/android/
H A Dbinder_alloc.c61 VISIBLE_IF_KUNIT size_t binder_alloc_buffer_size(struct binder_alloc *alloc, in binder_alloc_buffer_size() argument
64 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
65 return alloc->vm_start + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
70 static void binder_insert_free_buffer(struct binder_alloc *alloc, in binder_insert_free_buffer() argument
73 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
81 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); in binder_insert_free_buffer()
85 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
92 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
100 rb_insert_color(&new_buffer->rb_node, &alloc in binder_insert_free_buffer()
104 binder_insert_allocated_buffer_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer) binder_insert_allocated_buffer_locked() argument
129 binder_alloc_prepare_to_free_locked(struct binder_alloc * alloc,unsigned long user_ptr) binder_alloc_prepare_to_free_locked() argument
169 binder_alloc_prepare_to_free(struct binder_alloc * alloc,unsigned long user_ptr) binder_alloc_prepare_to_free() argument
177 binder_set_installed_page(struct binder_alloc * alloc,unsigned long index,struct page * page) binder_set_installed_page() argument
186 binder_get_installed_page(struct binder_alloc * alloc,unsigned long index) binder_get_installed_page() argument
192 binder_lru_freelist_add(struct binder_alloc * alloc,unsigned long start,unsigned long end) binder_lru_freelist_add() argument
222 binder_alloc_set_mapped(struct binder_alloc * alloc,bool state) binder_alloc_set_mapped() argument
228 binder_alloc_is_mapped(struct binder_alloc * alloc) binder_alloc_is_mapped() argument
234 binder_page_lookup(struct binder_alloc * alloc,unsigned long addr) binder_page_lookup() argument
254 binder_page_insert(struct binder_alloc * alloc,unsigned long addr,struct page * page) binder_page_insert() argument
281 binder_page_alloc(struct binder_alloc * alloc,unsigned long index) binder_page_alloc() argument
312 binder_install_single_page(struct binder_alloc * alloc,unsigned long index,unsigned long addr) binder_install_single_page() argument
361 binder_install_buffer_pages(struct binder_alloc * alloc,struct binder_buffer * buffer,size_t size) binder_install_buffer_pages() argument
392 binder_lru_freelist_del(struct binder_alloc * alloc,unsigned long start,unsigned long end) binder_lru_freelist_del() argument
425 debug_no_space_locked(struct binder_alloc * alloc) debug_no_space_locked() argument
462 debug_low_async_space_locked(struct binder_alloc * alloc) debug_low_async_space_locked() argument
516 binder_alloc_new_buf_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer,size_t size,int is_async) binder_alloc_new_buf_locked() argument
647 binder_alloc_new_buf(struct binder_alloc * alloc,size_t data_size,size_t offsets_size,size_t extra_buffers_size,int is_async) binder_alloc_new_buf() argument
712 binder_delete_free_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer) binder_delete_free_buffer() argument
739 binder_free_buf_locked(struct binder_alloc * alloc,struct binder_buffer * buffer) binder_free_buf_locked() argument
811 binder_alloc_get_page(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,pgoff_t * pgoffp) binder_alloc_get_page() argument
833 binder_alloc_clear_buf(struct binder_alloc * alloc,struct binder_buffer * buffer) binder_alloc_clear_buf() argument
860 binder_alloc_free_buf(struct binder_alloc * alloc,struct binder_buffer * buffer) binder_alloc_free_buf() argument
894 binder_alloc_mmap_handler(struct binder_alloc * alloc,struct vm_area_struct * vma) binder_alloc_mmap_handler() argument
964 binder_alloc_deferred_release(struct binder_alloc * alloc) binder_alloc_deferred_release() argument
1042 binder_alloc_print_allocated(struct seq_file * m,struct binder_alloc * alloc) binder_alloc_print_allocated() argument
1065 binder_alloc_print_pages(struct seq_file * m,struct binder_alloc * alloc) binder_alloc_print_pages() argument
1100 binder_alloc_get_allocated_count(struct binder_alloc * alloc) binder_alloc_get_allocated_count() argument
1120 binder_alloc_vma_close(struct binder_alloc * alloc) binder_alloc_vma_close() argument
1141 struct binder_alloc *alloc = mdata->alloc; binder_alloc_free_page() local
1233 __binder_alloc_init(struct binder_alloc * alloc,struct list_lru * freelist) __binder_alloc_init() argument
1252 binder_alloc_init(struct binder_alloc * alloc) binder_alloc_init() argument
1304 check_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t offset,size_t bytes) check_buffer() argument
1330 binder_alloc_copy_user_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,const void __user * from,size_t bytes) binder_alloc_copy_user_to_buffer() argument
1361 binder_alloc_do_buffer_copy(struct binder_alloc * alloc,bool to_buffer,struct binder_buffer * buffer,binder_size_t buffer_offset,void * ptr,size_t bytes) binder_alloc_do_buffer_copy() argument
1392 binder_alloc_copy_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,void * src,size_t bytes) binder_alloc_copy_to_buffer() argument
1402 binder_alloc_copy_from_buffer(struct binder_alloc * alloc,void * dest,struct binder_buffer * buffer,binder_size_t buffer_offset,size_t bytes) binder_alloc_copy_from_buffer() argument
[all...]
H A Dbinder_alloc.h22 * @entry: entry alloc->buffers
63 * @alloc: binder_alloc owning the page to reclaim
64 * @page_index: offset in @alloc->pages[] into the page to reclaim
68 struct binder_alloc *alloc; member
127 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
132 void binder_alloc_init(struct binder_alloc *alloc);
135 void binder_alloc_vma_close(struct binder_alloc *alloc);
137 binder_alloc_prepare_to_free(struct binder_alloc *alloc,
139 void binder_alloc_free_buf(struct binder_alloc *alloc,
141 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
157 binder_alloc_get_free_async_space(struct binder_alloc * alloc) binder_alloc_get_free_async_space() argument
[all...]
H A Dbinder_trace.h298 TP_PROTO(struct binder_alloc *alloc, bool allocate,
300 TP_ARGS(alloc, allocate, start, end),
308 __entry->proc = alloc->pid;
310 __entry->offset = start - alloc->vm_start;
319 TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
320 TP_ARGS(alloc, page_index),
326 __entry->proc = alloc->pid;
334 TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
335 TP_ARGS(alloc, page_index));
338 TP_PROTO(const struct binder_alloc *alloc, size_
[all...]
/linux/drivers/infiniband/hw/cxgb4/
H A Did_table.c44 u32 c4iw_id_alloc(struct c4iw_id_table *alloc) in c4iw_id_alloc() argument
49 spin_lock_irqsave(&alloc->lock, flags); in c4iw_id_alloc()
51 obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); in c4iw_id_alloc()
52 if (obj >= alloc->max) in c4iw_id_alloc()
53 obj = find_first_zero_bit(alloc->table, alloc->max); in c4iw_id_alloc()
55 if (obj < alloc->max) { in c4iw_id_alloc()
56 if (alloc in c4iw_id_alloc()
71 c4iw_id_free(struct c4iw_id_table * alloc,u32 obj) c4iw_id_free() argument
82 c4iw_id_table_alloc(struct c4iw_id_table * alloc,u32 start,u32 num,u32 reserved,u32 flags) c4iw_id_table_alloc() argument
103 c4iw_id_table_free(struct c4iw_id_table * alloc) c4iw_id_table_free() argument
[all...]
/linux/sound/isa/gus/
H A Dgus_mem.c18 void snd_gf1_mem_lock(struct snd_gf1_mem * alloc, int xup) in snd_gf1_mem_lock() argument
21 mutex_lock(&alloc->memory_mutex); in snd_gf1_mem_lock()
23 mutex_unlock(&alloc->memory_mutex); in snd_gf1_mem_lock()
28 snd_gf1_mem_xalloc(struct snd_gf1_mem *alloc, struct snd_gf1_mem_block *block, in snd_gf1_mem_xalloc() argument
43 pblock = alloc->first; in snd_gf1_mem_xalloc()
49 if (pblock == alloc->first) in snd_gf1_mem_xalloc()
50 alloc->first = nblock; in snd_gf1_mem_xalloc()
53 mutex_unlock(&alloc->memory_mutex); in snd_gf1_mem_xalloc()
59 if (alloc->last == NULL) { in snd_gf1_mem_xalloc()
61 alloc in snd_gf1_mem_xalloc()
70 snd_gf1_mem_xfree(struct snd_gf1_mem * alloc,struct snd_gf1_mem_block * block) snd_gf1_mem_xfree() argument
100 snd_gf1_mem_look(struct snd_gf1_mem * alloc,unsigned int address) snd_gf1_mem_look() argument
113 snd_gf1_mem_share(struct snd_gf1_mem * alloc,unsigned int * share_id) snd_gf1_mem_share() argument
128 snd_gf1_mem_find(struct snd_gf1_mem * alloc,struct snd_gf1_mem_block * block,unsigned int size,int w_16,int align) snd_gf1_mem_find() argument
180 snd_gf1_mem_alloc(struct snd_gf1_mem * alloc,int owner,char * name,int size,int w_16,int align,unsigned int * share_id) snd_gf1_mem_alloc() argument
213 snd_gf1_mem_free(struct snd_gf1_mem * alloc,unsigned int address) snd_gf1_mem_free() argument
231 struct snd_gf1_mem *alloc; snd_gf1_mem_init() local
260 struct snd_gf1_mem *alloc; snd_gf1_mem_done() local
278 struct snd_gf1_mem *alloc; snd_gf1_mem_info_read() local
[all...]
/linux/drivers/android/tests/
H A Dbinder_alloc_kunit.c141 struct binder_alloc *alloc, in check_buffer_pages_allocated() argument
152 page_index = (page_addr - alloc->vm_start) / PAGE_SIZE; in check_buffer_pages_allocated()
153 if (!alloc->pages[page_index] || in check_buffer_pages_allocated()
154 !list_empty(page_to_lru(alloc->pages[page_index]))) { in check_buffer_pages_allocated()
155 kunit_err(test, "expect alloc but is %s at page index %d\n", in check_buffer_pages_allocated()
156 alloc->pages[page_index] ? in check_buffer_pages_allocated()
165 struct binder_alloc *alloc, in binder_alloc_test_alloc_buf() argument
173 buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); in binder_alloc_test_alloc_buf()
175 !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i])) in binder_alloc_test_alloc_buf()
183 struct binder_alloc *alloc, in binder_alloc_test_free_buf() argument
205 binder_alloc_test_free_page(struct kunit * test,struct binder_alloc * alloc) binder_alloc_test_free_page() argument
230 binder_alloc_test_alloc_free(struct kunit * test,struct binder_alloc * alloc,struct binder_alloc_test_case_info * tc,size_t end) binder_alloc_test_alloc_free() argument
299 permute_frees(struct kunit * test,struct binder_alloc * alloc,struct binder_alloc_test_case_info * tc,unsigned long * runs,unsigned long * failures,int index,size_t end) permute_frees() argument
335 gen_buf_sizes(struct kunit * test,struct binder_alloc * alloc,struct binder_alloc_test_case_info * tc,size_t * end_offset,unsigned long * runs,unsigned long * failures) gen_buf_sizes() argument
370 gen_buf_offsets(struct kunit * test,struct binder_alloc * alloc,size_t * end_offset,int * alignments,unsigned long * runs,unsigned long * failures,int index) gen_buf_offsets() argument
406 struct binder_alloc alloc; global() member
423 struct binder_alloc *alloc = &priv->alloc; binder_alloc_test_mmap() local
468 struct binder_alloc *alloc = vma->vm_private_data; binder_alloc_test_vma_close() local
481 struct binder_alloc *alloc = filp->private_data; binder_alloc_test_mmap_handler() local
[all...]
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c40 u32 mthca_alloc(struct mthca_alloc *alloc) in mthca_alloc() argument
45 spin_lock_irqsave(&alloc->lock, flags); in mthca_alloc()
47 obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); in mthca_alloc()
48 if (obj >= alloc->max) { in mthca_alloc()
49 alloc->top = (alloc->top + alloc->max) & alloc in mthca_alloc()
64 mthca_free(struct mthca_alloc * alloc,u32 obj) mthca_free() argument
79 mthca_alloc_init(struct mthca_alloc * alloc,u32 num,u32 mask,u32 reserved) mthca_alloc_init() argument
100 mthca_alloc_cleanup(struct mthca_alloc * alloc) mthca_alloc_cleanup() argument
[all...]
H A Dmthca_uar.c40 uar->index = mthca_alloc(&dev->uar_table.alloc); in mthca_uar_alloc()
51 mthca_free(&dev->uar_table.alloc, uar->index); in mthca_uar_free()
58 ret = mthca_alloc_init(&dev->uar_table.alloc, in mthca_init_uar_table()
67 mthca_alloc_cleanup(&dev->uar_table.alloc); in mthca_init_uar_table()
77 mthca_alloc_cleanup(&dev->uar_table.alloc); in mthca_cleanup_uar_table()
H A Dmthca_pd.c46 pd->pd_num = mthca_alloc(&dev->pd_table.alloc); in mthca_pd_alloc()
56 mthca_free(&dev->pd_table.alloc, pd->pd_num); in mthca_pd_alloc()
66 mthca_free(&dev->pd_table.alloc, pd->pd_num); in mthca_pd_free()
71 return mthca_alloc_init(&dev->pd_table.alloc, in mthca_init_pd_table()
80 mthca_alloc_cleanup(&dev->pd_table.alloc); in mthca_cleanup_pd_table()
/linux/fs/ocfs2/
H A Dlocalalloc.c20 #include "alloc.h"
35 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc);
38 struct ocfs2_dinode *alloc,
42 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
46 struct ocfs2_dinode *alloc,
64 * the local alloc.
66 * Generally, we'd like to pick as large a local alloc as
72 * Some things work against us when trying to choose a large local alloc:
87 * alloc maximums at various cluster sizes (4k blocksize)
142 * local alloc siz in ocfs2_la_default_mb()
272 struct ocfs2_dinode *alloc = NULL; ocfs2_load_local_alloc() local
376 struct ocfs2_dinode *alloc = NULL; ocfs2_shutdown_local_alloc() local
484 struct ocfs2_dinode *alloc; ocfs2_begin_local_alloc_recovery() local
548 ocfs2_complete_local_alloc_recovery(struct ocfs2_super * osb,struct ocfs2_dinode * alloc) ocfs2_complete_local_alloc_recovery() argument
620 struct ocfs2_dinode *alloc; ocfs2_reserve_local_alloc_bits() local
724 struct ocfs2_dinode *alloc; ocfs2_claim_local_alloc_bits() local
780 struct ocfs2_dinode *alloc; ocfs2_free_local_alloc_bits() local
812 ocfs2_local_alloc_count_bits(struct ocfs2_dinode * alloc) ocfs2_local_alloc_count_bits() argument
824 ocfs2_local_alloc_find_clear_bits(struct ocfs2_super * osb,struct ocfs2_dinode * alloc,u32 * numbits,struct ocfs2_alloc_reservation * resv) ocfs2_local_alloc_find_clear_bits() argument
905 ocfs2_clear_local_alloc(struct ocfs2_dinode * alloc) ocfs2_clear_local_alloc() argument
944 ocfs2_sync_local_to_main(struct ocfs2_super * osb,handle_t * handle,struct ocfs2_dinode * alloc,struct inode * main_bm_inode,struct buffer_head * main_bm_bh) ocfs2_sync_local_to_main() argument
1148 struct ocfs2_dinode *alloc = NULL; ocfs2_local_alloc_new_window() local
1237 struct ocfs2_dinode *alloc; ocfs2_local_alloc_slide_window() local
[all...]
/linux/fs/xfs/libxfs/
H A Dxfs_alloc_btree.c139 key->alloc.ar_startblock = rec->alloc.ar_startblock; in xfs_allocbt_init_key_from_rec()
140 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; in xfs_allocbt_init_key_from_rec()
150 x = be32_to_cpu(rec->alloc.ar_startblock); in xfs_bnobt_init_high_key_from_rec()
151 x += be32_to_cpu(rec->alloc.ar_blockcount) - 1; in xfs_bnobt_init_high_key_from_rec()
152 key->alloc.ar_startblock = cpu_to_be32(x); in xfs_bnobt_init_high_key_from_rec()
153 key->alloc.ar_blockcount = 0; in xfs_bnobt_init_high_key_from_rec()
161 key->alloc.ar_blockcount = rec->alloc in xfs_cntbt_init_high_key_from_rec()
[all...]
/linux/drivers/gpu/drm/ttm/
H A Dttm_pool.c219 static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc) in ttm_pool_apply_caching() argument
222 unsigned int num_pages = alloc->pages - alloc->caching_divide; in ttm_pool_apply_caching()
227 switch (alloc->tt_caching) { in ttm_pool_apply_caching()
231 return set_pages_array_wc(alloc->caching_divide, num_pages); in ttm_pool_apply_caching()
233 return set_pages_array_uc(alloc->caching_divide, num_pages); in ttm_pool_apply_caching()
236 alloc->caching_divide = alloc->pages; in ttm_pool_apply_caching()
486 struct ttm_pool_alloc_state *alloc, in ttm_pool_allocated_page_commit() argument
492 *alloc in ttm_pool_allocated_page_commit()
512 ttm_pool_restore_commit(struct ttm_pool_tt_restore * restore,struct file * backup,const struct ttm_operation_ctx * ctx,struct ttm_pool_alloc_state * alloc) ttm_pool_restore_commit() argument
587 ttm_pool_page_allocated_restore(struct ttm_pool * pool,unsigned int order,struct page * p,enum ttm_caching page_caching,dma_addr_t first_dma,struct ttm_pool_tt_restore * restore,const struct ttm_pool_alloc_state * alloc) ttm_pool_page_allocated_restore() argument
607 ttm_pool_page_allocated(struct ttm_pool * pool,unsigned int order,struct page * p,enum ttm_caching page_caching,struct ttm_pool_alloc_state * alloc,struct ttm_pool_tt_restore * restore) ttm_pool_page_allocated() argument
681 ttm_pool_alloc_state_init(const struct ttm_tt * tt,struct ttm_pool_alloc_state * alloc) ttm_pool_alloc_state_init() argument
695 ttm_pool_alloc_find_order(unsigned int highest,const struct ttm_pool_alloc_state * alloc) ttm_pool_alloc_find_order() argument
702 __ttm_pool_alloc(struct ttm_pool * pool,struct ttm_tt * tt,const struct ttm_operation_ctx * ctx,struct ttm_pool_alloc_state * alloc,struct ttm_pool_tt_restore * restore) __ttm_pool_alloc() argument
812 struct ttm_pool_alloc_state alloc; ttm_pool_alloc() local
839 struct ttm_pool_alloc_state alloc; ttm_pool_restore_and_alloc() local
[all...]
/linux/lib/zstd/compress/
H A Dzstd_cwksp.h223 * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
277 void* const alloc = (BYTE*)ws->allocStart - bytes; in ZSTD_cwksp_reserve_internal_buffer_space() local
280 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); in ZSTD_cwksp_reserve_internal_buffer_space()
282 assert(alloc >= bottom); in ZSTD_cwksp_reserve_internal_buffer_space()
283 if (alloc < bottom) { in ZSTD_cwksp_reserve_internal_buffer_space()
284 DEBUGLOG(4, "cwksp: alloc failed!"); in ZSTD_cwksp_reserve_internal_buffer_space()
290 if (alloc < ws->tableValidEnd) { in ZSTD_cwksp_reserve_internal_buffer_space()
291 ws->tableValidEnd = alloc; in ZSTD_cwksp_reserve_internal_buffer_space()
293 ws->allocStart = alloc; in ZSTD_cwksp_reserve_internal_buffer_space()
294 return alloc; in ZSTD_cwksp_reserve_internal_buffer_space()
314 void *const alloc = ws->objectEnd; ZSTD_cwksp_internal_advance_phase() local
347 void* alloc; ZSTD_cwksp_reserve_internal() local
414 void* alloc; ZSTD_cwksp_reserve_table() local
454 void* alloc = ws->objectEnd; ZSTD_cwksp_reserve_object() local
[all...]
/linux/tools/perf/util/
H A Dstrbuf.c22 sb->alloc = sb->len = 0; in strbuf_init()
31 if (sb->alloc) { in strbuf_release()
39 char *res = sb->alloc ? sb->buf : NULL; in strbuf_detach()
51 if (nr < sb->alloc) in strbuf_grow()
57 if (alloc_nr(sb->alloc) > nr) in strbuf_grow()
58 nr = alloc_nr(sb->alloc); in strbuf_grow()
61 * Note that sb->buf == strbuf_slopbuf if sb->alloc == 0, and it is in strbuf_grow()
64 buf = realloc(sb->alloc ? sb->buf : NULL, nr * sizeof(*buf)); in strbuf_grow()
69 sb->alloc = nr; in strbuf_grow()
106 len = vsnprintf(sb->buf + sb->len, sb->alloc in strbuf_addv()
[all...]
H A Dstrbuf.h37 * XXX: do _not_ assume that the area that is yours is of size ->alloc - 1
51 size_t alloc; member
65 return sb->alloc ? sb->alloc - sb->len - 1 : 0; in strbuf_avail()
71 if (!sb->alloc) { in strbuf_setlen()
76 assert(len < sb->alloc); in strbuf_setlen()
H A Dhelp-unknown-cmd.c37 if (nr > cmds->alloc) { in add_cmd_list()
38 /* Choose bigger one to alloc */ in add_cmd_list()
39 if (alloc_nr(cmds->alloc) < nr) in add_cmd_list()
40 cmds->alloc = nr; in add_cmd_list()
42 cmds->alloc = alloc_nr(cmds->alloc); in add_cmd_list()
43 tmp = realloc(cmds->names, cmds->alloc * sizeof(*cmds->names)); in add_cmd_list()
/linux/tools/lib/subcmd/
H A Dsubcmd-util.h34 * is 'alloc', using the standard growing factor alloc_nr() macro.
36 * DO NOT USE any expression with side-effect for 'x' or 'alloc'.
38 #define ALLOC_GROW(x, nr, alloc) \ argument
40 if ((nr) > alloc) { \
41 if (alloc_nr(alloc) < (nr)) \
42 alloc = (nr); \
44 alloc = alloc_nr(alloc); \
45 x = xrealloc((x), alloc * sizeof(*(x))); \
/linux/tools/testing/selftests/mm/
H A Ddroppable.c22 void *alloc; in main() local
28 alloc = mmap(0, alloc_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_DROPPABLE, -1, 0); in main()
29 assert(alloc != MAP_FAILED); in main()
30 memset(alloc, 'A', alloc_size); in main()
32 assert(*(uint8_t *)(alloc + i)); in main()
43 if (!*(uint8_t *)(alloc + i)) { in main()
/linux/tools/testing/selftests/bpf/progs/
H A Dverifier_kfunc_prog_types.c126 struct bpf_cpumask *alloc, *ref; in cpumask_kfunc_load_test() local
128 alloc = bpf_cpumask_create(); in cpumask_kfunc_load_test()
129 if (!alloc) in cpumask_kfunc_load_test()
132 ref = bpf_cpumask_acquire(alloc); in cpumask_kfunc_load_test()
133 bpf_cpumask_set_cpu(0, alloc); in cpumask_kfunc_load_test()
137 bpf_cpumask_release(alloc); in cpumask_kfunc_load_test()
/linux/arch/xtensa/variants/csp/include/variant/
H A Dtie-asm.h72 * alloc Select what category(ies) of registers to allocate; if any
76 .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
84 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
96 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
116 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
138 * alloc Select what category(ies) of registers to allocate; if any
142 .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
150 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
162 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
182 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc))
[all...]
/linux/arch/xtensa/variants/dc233c/include/variant/
H A Dtie-asm.h73 * alloc Select what category(ies) of registers to allocate; if any
77 .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
85 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
97 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
115 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
137 * alloc Select what category(ies) of registers to allocate; if any
141 .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
149 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
161 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
179 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc))
[all...]
/linux/fs/nfs/
H A Dnfs3acl.c260 struct posix_acl *orig = acl, *dfacl = NULL, *alloc; in nfs3_set_acl() local
267 alloc = get_inode_acl(inode, ACL_TYPE_DEFAULT); in nfs3_set_acl()
268 if (IS_ERR(alloc)) in nfs3_set_acl()
270 dfacl = alloc; in nfs3_set_acl()
274 alloc = get_inode_acl(inode, ACL_TYPE_ACCESS); in nfs3_set_acl()
275 if (IS_ERR(alloc)) in nfs3_set_acl()
278 acl = alloc; in nfs3_set_acl()
284 alloc = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); in nfs3_set_acl()
285 if (IS_ERR(alloc)) in nfs3_set_acl()
287 acl = alloc; in nfs3_set_acl()
[all...]
/linux/net/core/
H A Dpage_pool.c405 pool->alloc.cache[pool->alloc.count++] = netmem; in page_pool_refill_alloc_cache()
417 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); in page_pool_refill_alloc_cache()
420 if (likely(pool->alloc.count > 0)) { in page_pool_refill_alloc_cache()
421 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
434 if (likely(pool->alloc.count)) { in __page_pool_get_cached()
436 netmem = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
561 if (unlikely(pool->alloc.count > 0)) in __page_pool_alloc_netmems_slow()
562 return pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_netmems_slow()
565 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); in __page_pool_alloc_netmems_slow()
568 (struct page **)pool->alloc.cache); in __page_pool_alloc_netmems_slow()
[all …]
/linux/arch/xtensa/variants/de212/include/variant/
H A Dtie-asm.h72 * alloc Select what category(ies) of registers to allocate; if any
76 .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
86 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
104 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
126 * alloc Select what category(ies) of registers to allocate; if any
130 .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
140 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
158 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
/linux/scripts/gdb/linux/
H A Dslab.py70 def get_track(cache, object_pointer, alloc): argument
72 p += (alloc * track_type.sizeof)
153 def slabtrace(alloc, cache_name): argument
164 def process_slab(loc_track, slab_list, alloc, cache): argument
172 p = get_track(cache, object_pointer, alloc)
174 if alloc == track_alloc:
202 process_slab(loc_track, cache_node['partial'], alloc, target_cache)
203 process_slab(loc_track, cache_node['full'], alloc, target_cache)
241 --alloc
246 lx-slabtrace --cache_name kmalloc-1k --alloc
[all...]

12345678910>>...23