/linux/net/core/ |
H A D | page_pool.c | 46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument 48 #define recycle_stat_inc(pool, __stat) \ argument 50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 54 #define recycle_stat_add(pool, __stat, val) \ argument 56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 85 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument 94 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats() 95 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats() 96 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; in page_pool_get_stats() 97 stats->alloc_stats.empty += pool->alloc_stats.empty; in page_pool_get_stats() [all …]
|
H A D | page_pool_user.c | 18 /* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev, 19 * pool->user. 25 * linked to a netdev at creation time. Following page pool "visibility" 32 * to error, or (c) the entire namespace which owned this pool disappeared 36 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool, 42 struct page_pool *pool; in netdev_nl_page_pool_get_do() local 47 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do() 48 if (!pool || hlist_unhashed(&pool in netdev_nl_page_pool_get_do() 88 struct page_pool *pool; netdev_nl_page_pool_get_dump() local 114 page_pool_nl_stats_fill(struct sk_buff * rsp,const struct page_pool * pool,const struct genl_info * info) page_pool_nl_stats_fill() argument 216 page_pool_nl_fill(struct sk_buff * rsp,const struct page_pool * pool,const struct genl_info * info) page_pool_nl_fill() argument 262 netdev_nl_page_pool_event(const struct page_pool * pool,u32 cmd) netdev_nl_page_pool_event() argument 311 page_pool_list(struct page_pool * pool) page_pool_list() argument 337 page_pool_detached(struct page_pool * pool) page_pool_detached() argument 345 page_pool_unlist(struct page_pool * pool) page_pool_unlist() argument 359 struct page_pool *pool; page_pool_check_memory_provider() local 381 struct page_pool *pool; page_pool_unreg_netdev_wipe() local 394 struct page_pool *pool, *last; page_pool_unreg_netdev() local [all...] |
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
H A D | pool.c | 9 switch (resource->pool->type) { in hws_pool_free_one_resource() 11 mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource() 14 mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource() 23 static void hws_pool_resource_free(struct mlx5hws_pool *pool) in hws_pool_resource_free() argument 25 hws_pool_free_one_resource(pool->resource); in hws_pool_resource_free() 26 pool->resource = NULL; in hws_pool_resource_free() 28 if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { in hws_pool_resource_free() 29 hws_pool_free_one_resource(pool->mirror_resource); in hws_pool_resource_free() 30 pool->mirror_resource = NULL; in hws_pool_resource_free() 35 hws_pool_create_one_resource(struct mlx5hws_pool *pool, u3 argument 77 hws_pool_resource_alloc(struct mlx5hws_pool * pool) hws_pool_resource_alloc() argument 112 hws_pool_buddy_init(struct mlx5hws_pool * pool) hws_pool_buddy_init() argument 135 hws_pool_buddy_db_get_chunk(struct mlx5hws_pool * pool,struct mlx5hws_pool_chunk * chunk) hws_pool_buddy_db_get_chunk() argument 152 hws_pool_buddy_db_put_chunk(struct mlx5hws_pool * pool,struct mlx5hws_pool_chunk * chunk) hws_pool_buddy_db_put_chunk() argument 166 hws_pool_buddy_db_uninit(struct mlx5hws_pool * pool) hws_pool_buddy_db_uninit() argument 178 hws_pool_buddy_db_init(struct mlx5hws_pool * pool) hws_pool_buddy_db_init() argument 206 hws_pool_bitmap_init(struct mlx5hws_pool * pool) hws_pool_bitmap_init() argument 229 hws_pool_bitmap_db_get_chunk(struct mlx5hws_pool * pool,struct mlx5hws_pool_chunk * chunk) hws_pool_bitmap_db_get_chunk() argument 256 hws_pool_bitmap_db_put_chunk(struct mlx5hws_pool * pool,struct mlx5hws_pool_chunk * chunk) hws_pool_bitmap_db_put_chunk() argument 270 hws_pool_bitmap_db_uninit(struct mlx5hws_pool * pool) hws_pool_bitmap_db_uninit() argument 281 hws_pool_bitmap_db_init(struct mlx5hws_pool * pool) hws_pool_bitmap_db_init() argument 296 hws_pool_db_init(struct mlx5hws_pool * pool,enum mlx5hws_db_type db_type) hws_pool_db_init() argument 315 hws_pool_db_unint(struct mlx5hws_pool * pool) hws_pool_db_unint() argument 320 mlx5hws_pool_chunk_alloc(struct mlx5hws_pool * pool,struct mlx5hws_pool_chunk * chunk) mlx5hws_pool_chunk_alloc() argument 334 mlx5hws_pool_chunk_free(struct mlx5hws_pool * pool,struct mlx5hws_pool_chunk * chunk) mlx5hws_pool_chunk_free() argument 347 struct mlx5hws_pool *pool; mlx5hws_pool_create() local 380 mlx5hws_pool_destroy(struct mlx5hws_pool * pool) mlx5hws_pool_destroy() argument [all...] |
/linux/net/xdp/ |
H A D | xsk_buff_pool.c | 13 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 20 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 21 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 22 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 25 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument 32 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 34 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 37 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument 39 if (!pool) in xp_destroy() 42 kvfree(pool in xp_destroy() 47 xp_alloc_tx_descs(struct xsk_buff_pool * pool,struct xdp_sock * xs) xp_alloc_tx_descs() argument 61 struct xsk_buff_pool *pool; xp_create_and_assign_umem() local 121 xp_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xp_set_rxq_info() argument 130 xp_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xp_fill_cb() argument 142 xp_disable_drv_zc(struct xsk_buff_pool * pool) xp_disable_drv_zc() argument 165 xp_assign_dev(struct xsk_buff_pool * pool,struct net_device * netdev,u16 queue_id,u16 flags) xp_assign_dev() argument 251 xp_assign_dev_shared(struct xsk_buff_pool * pool,struct xdp_sock * umem_xs,struct net_device * dev,u16 queue_id) xp_assign_dev_shared() argument 268 xp_clear_dev(struct xsk_buff_pool * pool) xp_clear_dev() argument 285 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, xp_release_deferred() local 306 xp_get_pool(struct xsk_buff_pool * pool) xp_get_pool() argument 311 xp_put_pool(struct xsk_buff_pool * pool) xp_put_pool() argument 325 xp_find_dma_map(struct xsk_buff_pool * pool) xp_find_dma_map() argument 385 xp_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xp_dma_unmap() argument 420 xp_init_dma_info(struct xsk_buff_pool * pool,struct xsk_dma_map * dma_map) xp_init_dma_info() argument 446 xp_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs,struct page ** pages,u32 nr_pages) xp_dma_map() argument 491 xp_addr_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr) xp_addr_crosses_non_contig_pg() argument 497 xp_check_unaligned(struct xsk_buff_pool * pool,u64 * addr) xp_check_unaligned() argument 507 xp_check_aligned(struct xsk_buff_pool * pool,u64 * addr) xp_check_aligned() argument 513 xp_get_xskb(struct xsk_buff_pool * pool,u64 addr) xp_get_xskb() argument 529 __xp_alloc(struct xsk_buff_pool * pool) __xp_alloc() argument 560 xp_alloc(struct xsk_buff_pool * pool) xp_alloc() argument 586 xp_alloc_new_from_fq(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_new_from_fq() argument 622 xp_alloc_reused(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 nb_entries) xp_alloc_reused() argument 642 xp_alloc_slow(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_slow() argument 660 xp_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_batch() argument 684 xp_can_alloc(struct xsk_buff_pool * pool,u32 count) xp_can_alloc() argument 710 __xp_raw_get_addr(const struct xsk_buff_pool * pool,u64 addr) __xp_raw_get_addr() argument 715 __xp_raw_get_data(const struct xsk_buff_pool * pool,u64 addr) __xp_raw_get_data() argument 720 xp_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xp_raw_get_data() argument 726 __xp_raw_get_dma(const struct xsk_buff_pool * pool,u64 addr) __xp_raw_get_dma() argument 733 xp_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xp_raw_get_dma() argument 751 xp_raw_get_ctx(const struct xsk_buff_pool * pool,u64 addr) xp_raw_get_ctx() argument [all...] |
/linux/drivers/md/ |
H A D | dm-thin.c | 42 * The block size of the device holding pool data must be 194 * A pool device ties together a metadata device and a data device. It 201 * The pool runs in various modes. Ordered in degraded order for comparisons. 232 struct pool { struct 234 struct dm_target *ti; /* Only set if a pool target is bound */ argument 290 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument 292 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument 294 return pool in get_pool_mode() 236 pool_mdpool global() argument 239 pmdpool global() argument 245 pfpool global() argument 246 low_water_triggeredpool global() argument 247 suspendedpool global() argument 248 out_of_data_spacepool global() argument 250 prisonpool global() argument 251 copierpool global() argument 253 workerpool global() argument 254 wqpool global() argument 255 throttlepool global() argument 256 wakerpool global() argument 257 no_space_timeoutpool global() argument 259 last_commit_jiffiespool global() argument 260 ref_countpool global() argument 262 lockpool global() argument 263 deferred_flush_biospool global() argument 287 mapping_poolpool global() argument 297 notify_of_pool_mode_change(struct pool * pool) notify_of_pool_mode_change() argument 327 struct pool *pool; global() member 346 struct pool *pool; global() member 367 block_size_is_power_of_two(struct pool * pool) block_size_is_power_of_two() argument 372 block_to_sectors(struct pool * pool,dm_block_t b) block_to_sectors() argument 436 wake_worker(struct pool * pool) wake_worker() argument 443 bio_detain(struct pool * pool,struct dm_cell_key * key,struct bio * bio,struct dm_bio_prison_cell ** cell_result) bio_detain() argument 467 cell_release(struct pool * pool,struct dm_bio_prison_cell * cell,struct bio_list * bios) cell_release() argument 475 cell_visit_release(struct pool * pool,void (* fn)(void *,struct dm_bio_prison_cell *),void * context,struct dm_bio_prison_cell * cell) cell_visit_release() argument 484 cell_release_no_holder(struct pool * pool,struct dm_bio_prison_cell * cell,struct bio_list * bios) cell_release_no_holder() argument 492 cell_error_with_code(struct pool * pool,struct dm_bio_prison_cell * cell,blk_status_t error_code) cell_error_with_code() argument 499 get_pool_io_error_code(struct pool * pool) get_pool_io_error_code() argument 504 cell_error(struct pool * pool,struct dm_bio_prison_cell * cell) cell_error() argument 509 cell_success(struct pool * pool,struct dm_bio_prison_cell * cell) cell_success() argument 514 cell_requeue(struct pool * pool,struct dm_bio_prison_cell * cell) cell_requeue() argument 540 __pool_table_insert(struct pool * pool) __pool_table_insert() argument 546 __pool_table_remove(struct pool * pool) __pool_table_remove() argument 554 struct pool *pool = NULL, *tmp; __pool_table_lookup() local 570 struct pool *pool = NULL, *tmp; __pool_table_lookup_metadata_dev() local 621 struct pool *pool = tc->pool; requeue_deferred_cells() local 650 error_retry_list_with_code(struct pool * pool,blk_status_t error) error_retry_list_with_code() argument 660 error_retry_list(struct pool * pool) error_retry_list() argument 674 struct pool *pool = tc->pool; get_bio_block() local 691 struct pool *pool = tc->pool; get_bio_block_range() local 716 struct pool *pool = tc->pool; remap() local 741 inc_all_io_entry(struct pool * pool,struct bio * bio) inc_all_io_entry() argument 754 struct pool *pool = tc->pool; issue() local 829 struct pool *pool = m->tc->pool; __complete_mapping_preparation() local 840 struct pool *pool = m->tc->pool; complete_mapping_preparation() local 882 struct pool *pool = tc->pool; cell_defer_no_holder() local 962 struct pool *pool = tc->pool; complete_overwrite_bio() local 995 struct pool *pool = tc->pool; process_prepared_mapping() local 1087 struct pool *pool = tc->pool; passdown_double_checking_shared_status() local 1129 struct pool *pool = m->tc->pool; queue_passdown_pt2() local 1151 struct pool *pool = tc->pool; process_prepared_discard_passdown_pt1() local 1200 struct pool *pool = tc->pool; process_prepared_discard_passdown_pt2() local 1218 process_prepared(struct pool * pool,struct list_head * head,process_mapping_fn * fn) process_prepared() argument 1236 io_overlaps_block(struct pool * pool,struct bio * bio) io_overlaps_block() argument 1242 io_overwrites_block(struct pool * pool,struct bio * bio) io_overwrites_block() argument 1255 ensure_next_mapping(struct pool * pool) ensure_next_mapping() argument 1265 get_next_mapping(struct pool * pool) get_next_mapping() argument 1296 struct pool *pool = tc->pool; remap_and_issue_overwrite() local 1315 struct pool *pool = tc->pool; schedule_copy() local 1383 struct pool *pool = tc->pool; schedule_zero() local 1413 struct pool *pool = tc->pool; schedule_external_copy() local 1440 is_read_only(struct pool * pool) is_read_only() argument 1445 check_for_metadata_space(struct pool * pool) check_for_metadata_space() argument 1463 check_for_data_space(struct pool * pool) check_for_data_space() argument 1485 commit(struct pool * pool) commit() argument 1503 check_low_water_mark(struct pool * pool,dm_block_t free_blocks) check_low_water_mark() argument 1519 struct pool *pool = tc->pool; alloc_data_block() local 1592 should_error_unserviceable_bio(struct pool * pool) should_error_unserviceable_bio() argument 1616 handle_unserviceable_bio(struct pool * pool,struct bio * bio) handle_unserviceable_bio() argument 1627 retry_bios_on_resume(struct pool * pool,struct dm_bio_prison_cell * cell) retry_bios_on_resume() argument 1649 struct pool *pool = tc->pool; process_discard_cell_no_passdown() local 1669 struct pool *pool = tc->pool; break_up_discard_bio() local 1808 struct pool *pool = tc->pool; break_sharing() local 1876 struct pool *pool = tc->pool; process_shared_bio() local 1909 struct pool *pool = tc->pool; provision_block() local 1956 struct pool *pool = tc->pool; process_cell() local 2010 struct pool *pool = tc->pool; process_bio() local 2111 need_commit_due_to_time(struct pool * pool) need_commit_due_to_time() argument 2183 struct pool *pool = tc->pool; process_thin_deferred_bios() local 2257 sort_cells(struct pool * pool,struct list_head * cells) sort_cells() argument 2277 struct pool *pool = tc->pool; process_thin_deferred_cells() local 2330 get_first_thin(struct pool * pool) get_first_thin() argument 2343 get_next_thin(struct pool * pool,struct thin_c * tc) get_next_thin() argument 2360 process_deferred_bios(struct pool * pool) process_deferred_bios() argument 2418 struct pool *pool = container_of(ws, struct pool, worker); do_worker() local 2439 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); do_waker() local 2452 struct pool *pool = container_of(to_delayed_work(ws), struct pool, do_no_space_timeout() local 2479 pool_work_wait(struct pool_work * pw,struct pool * pool,void (* fn)(struct work_struct *)) pool_work_wait() argument 2528 set_discard_callbacks(struct pool * pool) set_discard_callbacks() argument 2542 set_pool_mode(struct pool * pool,enum pool_mode new_mode) set_pool_mode() argument 2640 abort_transaction(struct pool * pool) abort_transaction() argument 2656 metadata_operation_failed(struct pool * pool,const char * op,int r) metadata_operation_failed() argument 2676 struct pool *pool = tc->pool; thin_defer_bio() local 2687 struct pool *pool = tc->pool; thin_defer_bio_with_throttle() local 2696 struct pool *pool = tc->pool; thin_defer_cell() local 2813 requeue_bios(struct pool * pool) requeue_bios() argument 2843 struct pool *pool = pt->pool; disable_discard_passdown_if_not_supported() local 2863 bind_control_target(struct pool * pool,struct dm_target * ti) bind_control_target() argument 2889 unbind_control_target(struct pool * pool,struct dm_target * ti) unbind_control_target() argument 2910 __pool_destroy(struct pool * pool) __pool_destroy() argument 2944 struct pool *pool; pool_create() local 3072 __pool_inc(struct pool * pool) __pool_inc() argument 3078 __pool_dec(struct pool * pool) __pool_dec() argument 3092 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); __pool_find() local 3195 struct pool *pool = context; metadata_low_callback() local 3216 struct pool *pool = context; metadata_pre_commit_callback() local 3289 struct pool *pool; pool_ctr() local 3443 struct pool *pool = pt->pool; pool_map() local 3459 struct pool *pool = pt->pool; maybe_resize_data_dev() local 3507 struct pool *pool = pt->pool; maybe_resize_metadata_dev() local 3570 struct pool *pool = pt->pool; pool_preresume() local 3601 pool_suspend_active_thins(struct pool * pool) pool_suspend_active_thins() argument 3613 pool_resume_active_thins(struct pool * pool) pool_resume_active_thins() argument 3628 struct pool *pool = pt->pool; pool_resume() local 3648 struct pool *pool = pt->pool; pool_presuspend() local 3660 struct pool *pool = pt->pool; pool_presuspend_undo() local 3672 struct pool *pool = pt->pool; pool_postsuspend() local 3703 process_create_thin_mesg(unsigned int argc,char ** argv,struct pool * pool) process_create_thin_mesg() argument 3726 process_create_snap_mesg(unsigned int argc,char ** argv,struct pool * pool) process_create_snap_mesg() argument 3754 process_delete_mesg(unsigned int argc,char ** argv,struct pool * pool) process_delete_mesg() argument 3774 process_set_transaction_id_mesg(unsigned int argc,char ** argv,struct pool * pool) process_set_transaction_id_mesg() argument 3803 process_reserve_metadata_snap_mesg(unsigned int argc,char ** argv,struct pool * pool) process_reserve_metadata_snap_mesg() argument 3820 process_release_metadata_snap_mesg(unsigned int argc,char ** argv,struct pool * pool) process_release_metadata_snap_mesg() argument 3849 struct pool *pool = pt->pool; pool_message() local 3929 struct pool *pool = pt->pool; pool_status() local 4055 struct pool *pool = pt->pool; pool_io_hints() local 4342 struct pool *pool = h->tc->pool; thin_endio() local 4470 struct pool *pool = tc->pool; thin_iterate_devices() local 4490 struct pool *pool = tc->pool; thin_io_hints() local [all...] |
/linux/net/ceph/ |
H A D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc() 20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 23 msg->pool = pool; in msgpool_alloc() 30 struct ceph_msgpool *pool = arg; in msgpool_free() local 33 dout("msgpool_release %s %p\n", pool in msgpool_free() 38 ceph_msgpool_init(struct ceph_msgpool * pool,int type,int front_len,int max_data_items,int size,const char * name) ceph_msgpool_init() argument 53 ceph_msgpool_destroy(struct ceph_msgpool * pool) ceph_msgpool_destroy() argument 59 ceph_msgpool_get(struct ceph_msgpool * pool,int front_len,int max_data_items) ceph_msgpool_get() argument 81 ceph_msgpool_put(struct ceph_msgpool * pool,struct ceph_msg * msg) ceph_msgpool_put() argument [all...] |
/linux/sound/core/seq/ |
H A D | seq_memory.c | 22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument 24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument 29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 225 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument 228 cell->next = pool->free; in free_cell() 229 pool->free = cell; in free_cell() 230 atomic_dec(&pool in free_cell() 235 struct snd_seq_pool *pool; snd_seq_cell_free() local 267 snd_seq_cell_alloc(struct snd_seq_pool * pool,struct snd_seq_event_cell ** cellp,int nonblock,struct file * file,struct mutex * mutexp) snd_seq_cell_alloc() argument 339 snd_seq_event_dup(struct snd_seq_pool * pool,struct snd_seq_event * event,struct snd_seq_event_cell ** cellp,int nonblock,struct file * file,struct mutex * mutexp) snd_seq_event_dup() argument 426 snd_seq_pool_poll_wait(struct snd_seq_pool * pool,struct file * file,poll_table * wait) snd_seq_pool_poll_wait() argument 436 snd_seq_pool_init(struct snd_seq_pool * pool) snd_seq_pool_init() argument 475 snd_seq_pool_mark_closing(struct snd_seq_pool * pool) snd_seq_pool_mark_closing() argument 484 snd_seq_pool_done(struct snd_seq_pool * pool) snd_seq_pool_done() argument 518 struct snd_seq_pool *pool; snd_seq_pool_new() local 542 struct snd_seq_pool *pool = *ppool; snd_seq_pool_delete() local 555 snd_seq_info_pool(struct snd_info_buffer * buffer,struct snd_seq_pool * pool,char * space) snd_seq_info_pool() argument [all...] |
/linux/lib/ |
H A D | objpool.c | 19 objpool_init_percpu_slot(struct objpool_head *pool, in objpool_init_percpu_slot() argument 24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot() 28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot() 37 obj = obj + pool->obj_size; in objpool_init_percpu_slot() 40 pool->nr_objs++; in objpool_init_percpu_slot() 48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, in objpool_init_percpu_slots() argument 63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots() 64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots() 68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots() 69 pool in objpool_init_percpu_slots() 105 objpool_fini_percpu_slots(struct objpool_head * pool) objpool_fini_percpu_slots() argument 118 objpool_init(struct objpool_head * pool,int nr_objs,int object_size,gfp_t gfp,void * context,objpool_init_obj_cb objinit,objpool_fini_cb release) objpool_init() argument 162 objpool_free(struct objpool_head * pool) objpool_free() argument 177 objpool_drop(void * obj,struct objpool_head * pool) objpool_drop() argument 192 objpool_fini(struct objpool_head * pool) objpool_fini() argument [all...] |
H A D | genalloc.c | 16 * available. If new memory is added to the pool a lock has to be 146 * gen_pool_create - create a new special memory pool 148 * @nid: node id of the node the pool structure should be allocated on, or -1 150 * Create a new special memory pool that can be used to manage special purpose 155 struct gen_pool *pool; in gen_pool_create() local 157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create() 158 if (pool != NULL) { in gen_pool_create() 159 spin_lock_init(&pool->lock); in gen_pool_create() 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 161 pool in gen_pool_create() 184 gen_pool_add_owner(struct gen_pool * pool,unsigned long virt,phys_addr_t phys,size_t size,int nid,void * owner) gen_pool_add_owner() argument 217 gen_pool_virt_to_phys(struct gen_pool * pool,unsigned long addr) gen_pool_virt_to_phys() argument 242 gen_pool_destroy(struct gen_pool * pool) gen_pool_destroy() argument 277 gen_pool_alloc_algo_owner(struct gen_pool * pool,size_t size,genpool_algo_t algo,void * data,void ** owner) gen_pool_alloc_algo_owner() argument 341 gen_pool_dma_alloc(struct gen_pool * pool,size_t size,dma_addr_t * dma) gen_pool_dma_alloc() argument 362 gen_pool_dma_alloc_algo(struct gen_pool * pool,size_t size,dma_addr_t * dma,genpool_algo_t algo,void * data) gen_pool_dma_alloc_algo() argument 395 gen_pool_dma_alloc_align(struct gen_pool * pool,size_t size,dma_addr_t * dma,int align) gen_pool_dma_alloc_align() argument 419 gen_pool_dma_zalloc(struct gen_pool * pool,size_t size,dma_addr_t * dma) gen_pool_dma_zalloc() argument 440 gen_pool_dma_zalloc_algo(struct gen_pool * pool,size_t size,dma_addr_t * dma,genpool_algo_t algo,void * data) gen_pool_dma_zalloc_algo() argument 466 gen_pool_dma_zalloc_align(struct gen_pool * pool,size_t size,dma_addr_t * dma,int align) gen_pool_dma_zalloc_align() argument 487 gen_pool_free_owner(struct gen_pool * pool,unsigned long addr,size_t size,void ** owner) gen_pool_free_owner() argument 531 gen_pool_for_each_chunk(struct gen_pool * pool,void (* func)(struct gen_pool * pool,struct gen_pool_chunk * chunk,void * data),void * data) gen_pool_for_each_chunk() argument 532 gen_pool_for_each_chunk(struct gen_pool * pool,void (* func)(struct gen_pool * pool,struct gen_pool_chunk * chunk,void * data),void * data) gen_pool_for_each_chunk() argument 553 gen_pool_has_addr(struct gen_pool * pool,unsigned long start,size_t size) gen_pool_has_addr() argument 580 gen_pool_avail(struct gen_pool * pool) gen_pool_avail() argument 599 gen_pool_size(struct gen_pool * pool) gen_pool_size() argument 622 gen_pool_set_algo(struct gen_pool * pool,genpool_algo_t algo,void * data) gen_pool_set_algo() argument 649 gen_pool_first_fit(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data,struct gen_pool * pool,unsigned long start_addr) gen_pool_first_fit() argument 668 gen_pool_first_fit_align(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data,struct gen_pool * pool,unsigned long start_addr) gen_pool_first_fit_align() argument 696 gen_pool_fixed_alloc(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data,struct gen_pool * pool,unsigned long start_addr) gen_pool_fixed_alloc() argument 731 gen_pool_first_fit_order_align(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data,struct gen_pool * pool,unsigned long start_addr) gen_pool_first_fit_order_align() argument 756 gen_pool_best_fit(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data,struct gen_pool * pool,unsigned long start_addr) gen_pool_best_fit() argument 832 struct gen_pool **ptr, *pool; devm_gen_pool_create() local 885 struct gen_pool *pool = NULL; of_gen_pool_get() local [all...] |
/linux/drivers/gpu/drm/panthor/ |
H A D | panthor_heap.c | 80 * The pool is attached to a panthor_file and can't be shared across processes. 89 /** @vm: VM this pool is bound to. */ 101 /** @size: Size of all chunks across all heaps in the pool. */ 113 static int panthor_get_heap_ctx_offset(struct panthor_heap_pool *pool, int id) in panthor_get_heap_ctx_offset() argument 115 return panthor_heap_ctx_stride(pool->ptdev) * id; in panthor_get_heap_ctx_offset() 118 static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id) in panthor_get_heap_ctx() argument 120 return pool->gpu_contexts->kmap + in panthor_get_heap_ctx() 121 panthor_get_heap_ctx_offset(pool, id); in panthor_get_heap_ctx() 124 static void panthor_free_heap_chunk(struct panthor_heap_pool *pool, in panthor_free_heap_chunk() argument 133 atomic_sub(heap->chunk_size, &pool in panthor_free_heap_chunk() 139 panthor_alloc_heap_chunk(struct panthor_heap_pool * pool,struct panthor_heap * heap,bool initial_chunk) panthor_alloc_heap_chunk() argument 201 panthor_free_heap_chunks(struct panthor_heap_pool * pool,struct panthor_heap * heap) panthor_free_heap_chunks() argument 210 panthor_alloc_heap_chunks(struct panthor_heap_pool * pool,struct panthor_heap * heap,u32 chunk_count) panthor_alloc_heap_chunks() argument 227 panthor_heap_destroy_locked(struct panthor_heap_pool * pool,u32 handle) panthor_heap_destroy_locked() argument 246 panthor_heap_destroy(struct panthor_heap_pool * pool,u32 handle) panthor_heap_destroy() argument 273 panthor_heap_create(struct panthor_heap_pool * pool,u32 initial_chunk_count,u32 chunk_size,u32 max_chunks,u32 target_in_flight,u64 * heap_ctx_gpu_va,u64 * first_chunk_gpu_va) panthor_heap_create() argument 369 panthor_heap_return_chunk(struct panthor_heap_pool * pool,u64 heap_gpu_va,u64 chunk_gpu_va) panthor_heap_return_chunk() argument 431 panthor_heap_grow(struct panthor_heap_pool * pool,u64 heap_gpu_va,u32 renderpasses_in_flight,u32 pending_frag_count,u64 * new_chunk_gpu_va) panthor_heap_grow() argument 493 struct panthor_heap_pool *pool = panthor_heap_pool_release() local 504 panthor_heap_pool_put(struct panthor_heap_pool * pool) panthor_heap_pool_put() argument 517 panthor_heap_pool_get(struct panthor_heap_pool * pool) panthor_heap_pool_get() argument 540 struct panthor_heap_pool *pool; panthor_heap_pool_create() local 594 panthor_heap_pool_destroy(struct panthor_heap_pool * pool) panthor_heap_pool_destroy() argument 625 panthor_heap_pool_size(struct panthor_heap_pool * pool) panthor_heap_pool_size() argument [all...] |
/linux/kernel/cgroup/ |
H A D | dmem.c | 106 static void free_cg_pool(struct dmem_cgroup_pool_state *pool) in free_cg_pool() argument 108 list_del(&pool->region_node); in free_cg_pool() 109 kfree(pool); in free_cg_pool() 113 set_resource_min(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_min() argument 115 page_counter_set_min(&pool->cnt, val); in set_resource_min() 119 set_resource_low(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_low() argument 121 page_counter_set_low(&pool->cnt, val); in set_resource_low() 125 set_resource_max(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_max() argument 127 page_counter_set_max(&pool->cnt, val); in set_resource_max() 130 static u64 get_resource_low(struct dmem_cgroup_pool_state *pool) in get_resource_low() argument 135 get_resource_min(struct dmem_cgroup_pool_state * pool) get_resource_min() argument 140 get_resource_max(struct dmem_cgroup_pool_state * pool) get_resource_max() argument 145 get_resource_current(struct dmem_cgroup_pool_state * pool) get_resource_current() argument 160 struct dmem_cgroup_pool_state *pool; dmemcs_offline() local 171 struct dmem_cgroup_pool_state *pool, *next; dmemcs_free() local 201 struct dmem_cgroup_pool_state *pool; find_cg_pool_locked() local 210 pool_parent(struct dmem_cgroup_pool_state * pool) pool_parent() argument 225 struct dmem_cgroup_pool_state *pool, *found_pool; dmem_cgroup_calculate_protection() local 271 struct dmem_cgroup_pool_state *pool = test_pool; dmem_cgroup_state_evict_valuable() local 324 struct dmem_cgroup_pool_state *pool, *ppool = NULL; alloc_pool_single() local 359 struct dmem_cgroup_pool_state *pool, *ppool, *retpool; get_cg_pool_locked() local 402 struct dmem_cgroup_pool_state *pool, *next; dmemcg_free_rcu() local 436 struct dmem_cgroup_pool_state *pool = dmem_cgroup_unregister_region() local 518 dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state * pool) dmem_cgroup_pool_state_put() argument 528 struct dmem_cgroup_pool_state *pool, *allocpool = NULL; get_cg_pool_unlocked() local 571 dmem_cgroup_uncharge(struct dmem_cgroup_pool_state * pool,u64 size) dmem_cgroup_uncharge() argument 605 struct dmem_cgroup_pool_state *pool; dmem_cgroup_try_charge() local 682 struct dmem_cgroup_pool_state *pool = NULL; dmemcg_limit_write() local 738 struct dmem_cgroup_pool_state *pool = find_cg_pool_locked(dmemcs, region); dmemcg_limit_show() local [all...] |
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/ |
H A D | dr_icm_pool.c | 24 struct mutex mutex; /* protect the ICM pool and ICM buddy */ 82 u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_mr_addr() 94 u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_icm_addr() 102 chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_byte_size() 111 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) in dr_icm_pool_mr_create() argument 113 struct mlx5_core_dev *mdev = pool->dmn->mdev; in dr_icm_pool_mr_create() 123 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create() 125 icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in dr_icm_pool_mr_create() 126 pool->icm_type); in dr_icm_pool_mr_create() 128 switch (pool in dr_icm_pool_mr_create() 263 dr_icm_buddy_create(struct mlx5dr_icm_pool * pool) dr_icm_buddy_create() argument 322 dr_icm_chunk_init(struct mlx5dr_icm_chunk * chunk,struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,struct mlx5dr_icm_buddy_mem * buddy_mem_pool,unsigned int seg) dr_icm_chunk_init() argument 341 dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool * pool) dr_icm_pool_is_sync_required() argument 346 dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool * pool) dr_icm_pool_clear_hot_chunks_arr() argument 365 dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool * pool) dr_icm_pool_sync_all_buddy_pools() argument 386 dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,struct mlx5dr_icm_buddy_mem ** buddy,unsigned int * seg) dr_icm_handle_buddies_get_mem() argument 435 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size) mlx5dr_icm_alloc_chunk() argument 470 struct mlx5dr_icm_pool *pool = buddy->pool; mlx5dr_icm_free_chunk() local 495 mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool * pool) mlx5dr_icm_pool_alloc_htbl() argument 500 mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool * pool,struct mlx5dr_ste_htbl * htbl) mlx5dr_icm_pool_free_htbl() argument 509 struct mlx5dr_icm_pool *pool; mlx5dr_icm_pool_create() local 564 mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool * pool) mlx5dr_icm_pool_destroy() argument [all...] |
H A D | dr_arg.c | 8 /* modify-header arg pool */ 18 /* argument pool area */ 23 struct mutex mutex; /* protect arg pool */ 31 static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool) in dr_arg_pool_alloc_objs() argument 43 pool->dmn->info.caps.log_header_modify_argument_granularity; in dr_arg_pool_alloc_objs() 46 max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity, in dr_arg_pool_alloc_objs() 49 min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc, in dr_arg_pool_alloc_objs() 52 if (pool->log_chunk_size > object_range) { in dr_arg_pool_alloc_objs() 53 mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n", in dr_arg_pool_alloc_objs() 54 pool in dr_arg_pool_alloc_objs() 97 dr_arg_pool_get_arg_obj(struct dr_arg_pool * pool) dr_arg_pool_get_arg_obj() argument 122 dr_arg_pool_put_arg_obj(struct dr_arg_pool * pool,struct mlx5dr_arg_obj * arg_obj) dr_arg_pool_put_arg_obj() argument 133 struct dr_arg_pool *pool; dr_arg_pool_create() local 156 dr_arg_pool_destroy(struct dr_arg_pool * pool) dr_arg_pool_destroy() argument [all...] |
/linux/include/net/page_pool/ |
H A D | helpers.h | 24 * allocated from page pool. There is no cache line dirtying for 'struct page' 25 * when a page is recycled back to the page pool. 29 * page allocated from page pool. Page splitting enables memory saving and thus 48 * the same page when a page is split. The API user must setup pool->p.max_len 49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called 67 bool page_pool_get_stats(const struct page_pool *pool, 88 * @pool: pool from which to allocate 92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument 96 return page_pool_alloc_pages(pool, gf in page_pool_dev_alloc_pages() 109 page_pool_dev_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size) page_pool_dev_alloc_frag() argument 118 page_pool_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp) page_pool_alloc_netmem() argument 147 page_pool_dev_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size) page_pool_dev_alloc_netmem() argument 156 page_pool_dev_alloc_netmems(struct page_pool * pool) page_pool_dev_alloc_netmems() argument 163 page_pool_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp) page_pool_alloc() argument 182 page_pool_dev_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size) page_pool_dev_alloc() argument 191 page_pool_alloc_va(struct page_pool * pool,unsigned int * size,gfp_t gfp) page_pool_alloc_va() argument 216 page_pool_dev_alloc_va(struct page_pool * pool,unsigned int * size) page_pool_dev_alloc_va() argument 232 page_pool_get_dma_dir(const struct page_pool * pool) page_pool_get_dma_dir() argument 331 page_pool_put_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) page_pool_put_netmem() argument 360 page_pool_put_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct) page_pool_put_page() argument 369 page_pool_put_full_netmem(struct page_pool * pool,netmem_ref netmem,bool allow_direct) page_pool_put_full_netmem() argument 385 page_pool_put_full_page(struct page_pool * pool,struct page * page,bool allow_direct) page_pool_put_full_page() argument 399 page_pool_recycle_direct(struct page_pool * pool,struct page * page) page_pool_recycle_direct() argument 405 page_pool_recycle_direct_netmem(struct page_pool * pool,netmem_ref netmem) page_pool_recycle_direct_netmem() argument 422 page_pool_free_va(struct page_pool * pool,void * va,bool allow_direct) page_pool_free_va() argument 450 __page_pool_dma_sync_for_cpu(const struct page_pool * pool,const dma_addr_t dma_addr,u32 offset,u32 dma_sync_size) __page_pool_dma_sync_for_cpu() argument 471 page_pool_dma_sync_for_cpu(const struct page_pool * pool,const struct page * page,u32 offset,u32 dma_sync_size) page_pool_dma_sync_for_cpu() argument 480 page_pool_dma_sync_netmem_for_cpu(const struct page_pool * pool,const netmem_ref netmem,u32 offset,u32 dma_sync_size) page_pool_dma_sync_netmem_for_cpu() argument 492 page_pool_put(struct page_pool * pool) page_pool_put() argument 497 page_pool_nid_changed(struct page_pool * pool,int new_nid) page_pool_nid_changed() argument 503 page_pool_is_unreadable(struct page_pool * pool) page_pool_is_unreadable() argument [all...] |
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | irq_affinity.c | 8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu) in cpu_put() argument 10 pool->irqs_per_cpu[cpu]--; in cpu_put() 13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu) in cpu_get() argument 15 pool->irqs_per_cpu[cpu]++; in cpu_get() 19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, in cpu_get_least_loaded() argument 27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded() 33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded() 38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", in cpu_get_least_loaded() 42 pool in cpu_get_least_loaded() 48 irq_pool_request_irq(struct mlx5_irq_pool * pool,struct irq_affinity_desc * af_desc) irq_pool_request_irq() argument 97 irq_pool_find_least_loaded(struct mlx5_irq_pool * pool,const struct cpumask * req_mask) irq_pool_find_least_loaded() argument 137 mlx5_irq_affinity_request(struct mlx5_core_dev * dev,struct mlx5_irq_pool * pool,struct irq_affinity_desc * af_desc) mlx5_irq_affinity_request() argument 189 struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq); mlx5_irq_affinity_irq_release() local [all...] |
/linux/drivers/gpu/drm/amd/display/dc/resource/dce80/ |
H A D | dce80_resource.c | 803 static void dce80_resource_destruct(struct dce110_resource_pool *pool) in dce80_resource_destruct() argument 807 for (i = 0; i < pool->base.pipe_count; i++) { in dce80_resource_destruct() 808 if (pool->base.opps[i] != NULL) in dce80_resource_destruct() 809 dce110_opp_destroy(&pool->base.opps[i]); in dce80_resource_destruct() 811 if (pool->base.transforms[i] != NULL) in dce80_resource_destruct() 812 dce80_transform_destroy(&pool->base.transforms[i]); in dce80_resource_destruct() 814 if (pool->base.ipps[i] != NULL) in dce80_resource_destruct() 815 dce_ipp_destroy(&pool->base.ipps[i]); in dce80_resource_destruct() 817 if (pool->base.mis[i] != NULL) { in dce80_resource_destruct() 818 kfree(TO_DCE_MEM_INPUT(pool in dce80_resource_destruct() 927 dce80_destroy_resource_pool(struct resource_pool ** pool) dce80_destroy_resource_pool() argument 950 dce80_construct(uint8_t num_virtual_links,struct dc * dc,struct dce110_resource_pool * pool) dce80_construct() argument 1135 struct dce110_resource_pool *pool = dce80_create_resource_pool() local 1152 dce81_construct(uint8_t num_virtual_links,struct dc * dc,struct dce110_resource_pool * pool) dce81_construct() argument 1335 struct dce110_resource_pool *pool = dce81_create_resource_pool() local 1352 dce83_construct(uint8_t num_virtual_links,struct dc * dc,struct dce110_resource_pool * pool) dce83_construct() argument 1532 struct dce110_resource_pool *pool = dce83_create_resource_pool() local [all...] |
/linux/drivers/gpu/drm/amd/display/dc/resource/dce60/ |
H A D | dce60_resource.c | 797 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct() argument 801 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct() 802 if (pool->base.opps[i] != NULL) in dce60_resource_destruct() 803 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct() 805 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct() 806 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct() 808 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct() 809 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct() 811 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct() 812 kfree(TO_DCE_MEM_INPUT(pool in dce60_resource_destruct() 921 dce60_destroy_resource_pool(struct resource_pool ** pool) dce60_destroy_resource_pool() argument 944 dce60_construct(uint8_t num_virtual_links,struct dc * dc,struct dce110_resource_pool * pool) dce60_construct() argument 1125 struct dce110_resource_pool *pool = dce60_create_resource_pool() local 1142 dce61_construct(uint8_t num_virtual_links,struct dc * dc,struct dce110_resource_pool * pool) dce61_construct() argument 1323 struct dce110_resource_pool *pool = dce61_create_resource_pool() local 1340 dce64_construct(uint8_t num_virtual_links,struct dc * dc,struct dce110_resource_pool * pool) dce64_construct() argument 1520 struct dce110_resource_pool *pool = dce64_create_resource_pool() local [all...] |
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
H A D | crypto.c | 13 * (for example, TLS) after last revalidation in a pool or a bulk. 19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument 33 int num_deks; /* the total number of keys in this pool */ 34 int avail_deks; /* the number of available keys in this pool */ 35 int in_use_deks; /* the number of being used keys in this pool */ 288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument 290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create() 291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create() 313 err = mlx5_crypto_create_dek_bulk(mdev, pool in mlx5_crypto_dek_bulk_create() 334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool * pool) mlx5_crypto_dek_pool_add_bulk() argument 357 mlx5_crypto_dek_pool_remove_bulk(struct mlx5_crypto_dek_pool * pool,struct mlx5_crypto_dek_bulk * bulk,bool delay) mlx5_crypto_dek_pool_remove_bulk() argument 370 mlx5_crypto_dek_pool_pop(struct mlx5_crypto_dek_pool * pool,u32 * obj_offset) mlx5_crypto_dek_pool_pop() argument 420 mlx5_crypto_dek_need_sync(struct mlx5_crypto_dek_pool * pool) mlx5_crypto_dek_need_sync() argument 426 mlx5_crypto_dek_free_locked(struct mlx5_crypto_dek_pool * pool,struct mlx5_crypto_dek * dek) mlx5_crypto_dek_free_locked() argument 454 mlx5_crypto_dek_pool_push(struct mlx5_crypto_dek_pool * pool,struct mlx5_crypto_dek * dek) mlx5_crypto_dek_pool_push() argument 479 mlx5_crypto_dek_bulk_reset_synced(struct mlx5_crypto_dek_pool * pool,struct mlx5_crypto_dek_bulk * bulk) mlx5_crypto_dek_bulk_reset_synced() argument 510 mlx5_crypto_dek_bulk_handle_avail(struct mlx5_crypto_dek_pool * pool,struct mlx5_crypto_dek_bulk * bulk,struct list_head * destroy_list) mlx5_crypto_dek_bulk_handle_avail() argument 524 mlx5_crypto_dek_pool_splice_destroy_list(struct mlx5_crypto_dek_pool * pool,struct list_head * list,struct list_head * head) mlx5_crypto_dek_pool_splice_destroy_list() argument 533 mlx5_crypto_dek_pool_free_wait_keys(struct mlx5_crypto_dek_pool * pool) mlx5_crypto_dek_pool_free_wait_keys() argument 548 mlx5_crypto_dek_pool_reset_synced(struct mlx5_crypto_dek_pool * pool) mlx5_crypto_dek_pool_reset_synced() argument 591 struct mlx5_crypto_dek_pool *pool = mlx5_crypto_dek_sync_work_fn() local 672 struct mlx5_crypto_dek_pool *pool = mlx5_crypto_dek_destroy_work_fn() local 684 struct mlx5_crypto_dek_pool *pool; mlx5_crypto_dek_pool_create() local 707 mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool * pool) mlx5_crypto_dek_pool_destroy() argument [all...] |
/linux/arch/arm64/kvm/hyp/nvhe/ |
H A D | page_alloc.c | 16 * Example buddy-tree for a 4-pages physically contiguous pool: 27 * Example of requests on this pool: 28 * __find_buddy_nocheck(pool, page 0, order 0) => page 1 29 * __find_buddy_nocheck(pool, page 0, order 1) => page 2 30 * __find_buddy_nocheck(pool, page 1, order 0) => page 0 31 * __find_buddy_nocheck(pool, page 2, order 0) => page 3 33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, in __find_buddy_nocheck() argument 42 * Don't return a page outside the pool range -- it belongs to in __find_buddy_nocheck() 45 if (addr < pool->range_start || addr >= pool in __find_buddy_nocheck() 52 __find_buddy_avail(struct hyp_pool * pool,struct hyp_page * p,u8 order) __find_buddy_avail() argument 93 __hyp_attach_page(struct hyp_pool * pool,struct hyp_page * p) __hyp_attach_page() argument 130 __hyp_extract_page(struct hyp_pool * pool,struct hyp_page * p,u8 order) __hyp_extract_page() argument 153 __hyp_put_page(struct hyp_pool * pool,struct hyp_page * p) __hyp_put_page() argument 166 hyp_put_page(struct hyp_pool * pool,void * addr) hyp_put_page() argument 175 hyp_get_page(struct hyp_pool * pool,void * addr) hyp_get_page() argument 198 hyp_alloc_pages(struct hyp_pool * pool,u8 order) hyp_alloc_pages() argument 223 hyp_pool_init(struct hyp_pool * pool,u64 pfn,unsigned int nr_pages,unsigned int reserved_pages) hyp_pool_init() argument [all...] |
/linux/net/rds/ |
H A D | ib_rdma.c | 194 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) in rds_ib_reuse_mr() argument 200 spin_lock_irqsave(&pool->clean_lock, flags); in rds_ib_reuse_mr() 201 ret = llist_del_first(&pool->clean_list); in rds_ib_reuse_mr() 202 spin_unlock_irqrestore(&pool->clean_lock, flags); in rds_ib_reuse_mr() 205 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_reuse_mr() 271 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local 273 atomic_sub(pinned, &pool->free_pinned); in rds_ib_teardown_mr() 277 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() argument 281 item_count = atomic_read(&pool in rds_ib_flush_goal() 338 rds_ib_flush_mr_pool(struct rds_ib_mr_pool * pool,int free_all,struct rds_ib_mr ** ibmr_ret) rds_ib_flush_mr_pool() argument 436 rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool * pool) rds_ib_try_reuse_ibmr() argument 475 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); rds_ib_mr_pool_flush_worker() local 483 struct rds_ib_mr_pool *pool = ibmr->pool; rds_ib_free_mr() local 630 rds_ib_destroy_mr_pool(struct rds_ib_mr_pool * pool) rds_ib_destroy_mr_pool() argument 642 struct rds_ib_mr_pool *pool; rds_ib_create_mr_pool() local [all...] |
/linux/include/net/ |
H A D | xsk_buff_pool.h | 30 struct xsk_buff_pool *pool; member 66 /* For performance reasons, each buff pool has its own array of dma_pages 106 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, 108 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, 110 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); 111 void xp_destroy(struct xsk_buff_pool *pool); 112 void xp_get_pool(struct xsk_buff_pool *pool); 113 bool xp_put_pool(struct xsk_buff_pool *pool); 114 void xp_clear_dev(struct xsk_buff_pool *pool); 115 void xp_add_xsk(struct xsk_buff_pool *pool, struc 121 xp_init_xskb_addr(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,u64 addr) xp_init_xskb_addr() argument 127 xp_init_xskb_dma(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,dma_addr_t * dma_pages,u64 addr) xp_init_xskb_dma() argument 171 xp_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size) xp_dma_sync_for_device() argument 184 xp_desc_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr,u32 len) xp_desc_crosses_non_contig_pg() argument 201 xp_aligned_extract_addr(struct xsk_buff_pool * pool,u64 addr) xp_aligned_extract_addr() argument 222 xp_aligned_extract_idx(struct xsk_buff_pool * pool,u64 addr) xp_aligned_extract_idx() argument 234 xp_get_handle(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool) xp_get_handle() argument 248 xp_tx_metadata_enabled(const struct xsk_buff_pool * pool) xp_tx_metadata_enabled() argument [all...] |
/linux/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | spectrum_cnt.c | 24 spinlock_t counter_pool_lock; /* Protects counter pool allocations */ 54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local 62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init() 63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local 104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini() 105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini() 115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local 117 return atomic_read(&pool in mlxsw_sp_counter_pool_occ_get() 124 struct mlxsw_sp_counter_pool *pool; mlxsw_sp_counter_pool_init() local 169 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; mlxsw_sp_counter_pool_fini() local 186 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; mlxsw_sp_counter_alloc() local 227 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; mlxsw_sp_counter_free() local [all...] |
/linux/include/linux/ |
H A D | genalloc.h | 16 * available. If new memory is added to the pool a lock has to be 46 * @pool: the pool being allocated from 52 void *data, struct gen_pool *pool, 56 * General purpose special memory pool descriptor. 60 struct list_head chunks; /* list of chunks in this pool */ 70 * General purpose special memory pool chunk descriptor. 73 struct list_head next_chunk; /* next chunk in pool */ 97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigne argument 119 gen_pool_add(struct gen_pool * pool,unsigned long addr,size_t size,int nid) gen_pool_add() argument 128 gen_pool_alloc_owner(struct gen_pool * pool,size_t size,void ** owner) gen_pool_alloc_owner() argument 135 gen_pool_alloc_algo(struct gen_pool * pool,size_t size,genpool_algo_t algo,void * data) gen_pool_alloc_algo() argument 151 gen_pool_alloc(struct gen_pool * pool,size_t size) gen_pool_alloc() argument 169 gen_pool_free(struct gen_pool * pool,unsigned long addr,size_t size) gen_pool_free() argument [all...] |
/linux/drivers/gpu/drm/i915/gt/ |
H A D | intel_gt_buffer_pool.c | 14 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) in bucket_for_size() argument 24 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size() 25 n = ARRAY_SIZE(pool->cache_list) - 1; in bucket_for_size() 27 return &pool->cache_list[n]; in bucket_for_size() 37 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) in pool_free_older_than() argument 44 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in pool_free_older_than() 45 struct list_head *list = &pool->cache_list[n]; in pool_free_older_than() 50 if (spin_trylock_irq(&pool->lock)) { in pool_free_older_than() 73 spin_unlock_irq(&pool->lock); in pool_free_older_than() 89 struct intel_gt_buffer_pool *pool in pool_free_work() local 102 struct intel_gt_buffer_pool *pool = node->pool; pool_retire() local 139 node_create(struct intel_gt_buffer_pool * pool,size_t sz,enum i915_map_type type) node_create() argument 174 struct intel_gt_buffer_pool *pool = >->buffer_pool; intel_gt_get_buffer_pool() local 222 struct intel_gt_buffer_pool *pool = >->buffer_pool; intel_gt_init_buffer_pool() local 233 struct intel_gt_buffer_pool *pool = >->buffer_pool; intel_gt_flush_buffer_pool() local 243 struct intel_gt_buffer_pool *pool = >->buffer_pool; intel_gt_fini_buffer_pool() local [all...] |
/linux/drivers/gpu/drm/ttm/tests/ |
H A D | ttm_pool_test.c | 79 struct ttm_pool *pool; in ttm_pool_pre_populated() local 86 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_pre_populated() 87 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_pre_populated() 89 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); in ttm_pool_pre_populated() 91 err = ttm_pool_alloc(pool, tt, &simple_ctx); in ttm_pool_pre_populated() 94 ttm_pool_free(pool, tt); in ttm_pool_pre_populated() 97 return pool; in ttm_pool_pre_populated() 140 struct ttm_pool *pool; in ttm_pool_alloc_basic() local 150 pool in ttm_pool_alloc_basic() 201 struct ttm_pool *pool; ttm_pool_alloc_basic_dma_addr() local 241 struct ttm_pool *pool; ttm_pool_alloc_order_caching_match() local 269 struct ttm_pool *pool; ttm_pool_alloc_caching_mismatch() local 303 struct ttm_pool *pool; ttm_pool_alloc_order_mismatch() local 339 struct ttm_pool *pool; ttm_pool_free_dma_alloc() local 370 struct ttm_pool *pool; ttm_pool_free_no_dma_alloc() local 398 struct ttm_pool *pool; ttm_pool_fini_basic() local [all...] |