/linux/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | sharedbuffer_configuration.py | 28 def __init__(self, pools): argument 30 for pool in pools: 72 # The threshold type of pools 4, 8, 9 and 10 cannot be changed 110 # The threshold type of pools 4, 8, 9 and 10 cannot be changed 187 pools = PoolList() 190 pools.append(Pool(pooldict)) 191 return pools 194 def do_check_pools(dlname, pools, vp): argument 195 for pool in pools: 219 def check_pools(dlname, pools) argument 290 check_tcbind(dlname, ports, pools) global() argument 357 check_portpool(dlname, ports, pools) global() argument [all...] |
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/ |
H A D | dr_arg.c | 28 struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX]; member 201 arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]); in mlx5dr_arg_get_obj() 226 dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj); in mlx5dr_arg_put_obj() 245 pool_mgr->pools[i] = dr_arg_pool_create(dmn, i); in mlx5dr_arg_mgr_create() 246 if (!pool_mgr->pools[i]) in mlx5dr_arg_mgr_create() 254 dr_arg_pool_destroy(pool_mgr->pools[i]); in mlx5dr_arg_mgr_create() 262 struct dr_arg_pool **pools; in mlx5dr_arg_mgr_destroy() local 268 pools = mgr->pools; in mlx5dr_arg_mgr_destroy() 270 dr_arg_pool_destroy(pools[ in mlx5dr_arg_mgr_destroy() [all...] |
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | pool.c | 25 if (!xsk->pools) { in mlx5e_xsk_get_pools() 26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools() 27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools() 28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools() 41 kfree(xsk->pools); in mlx5e_xsk_put_pools() 42 xsk->pools = NULL; in mlx5e_xsk_put_pools() 54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool() 60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
|
H A D | pool.h | 12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool() 18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
|
/linux/arch/sparc/kernel/ |
H A D | iommu-common.c | 82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init() 83 iommu->pools[i].start = start; in iommu_tbl_pool_init() 84 iommu->pools[i].hint = start; in iommu_tbl_pool_init() 86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init() 131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc() 193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 237 p = &tbl->pools[pool_nr]; in get_pool()
|
/linux/tools/net/ynl/samples/ |
H A D | page-pool.c | 60 struct netdev_page_pool_get_list *pools; in main() local 76 pools = netdev_page_pool_get_dump(ys); in main() 77 if (!pools) in main() 80 ynl_dump_foreach(pools, pp) { in main() 87 netdev_page_pool_get_list_free(pools); in main() 124 printf("page pools: %u (zombies: %u)\n", in main()
|
/linux/kernel/cgroup/ |
H A D | dmem.c | 36 * @pools: List of pools linked to this region. 39 struct list_head pools; member 49 * No new pools should be added to the region afterwards. 57 struct list_head pools; member 163 list_for_each_entry_rcu(pool, &dmemcs->pools, css_node) in dmemcs_offline() 174 list_for_each_entry_safe(pool, next, &dmemcs->pools, css_node) { in dmemcs_free() 194 INIT_LIST_HEAD(&dmemcs->pools); in dmemcs_alloc() 203 list_for_each_entry_rcu(pool, &dmemcs->pools, css_node, spin_is_locked(&dmemcg_lock)) in find_cg_pool_locked() 235 list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_nod in dmem_cgroup_calculate_protection() [all...] |
/linux/drivers/net/ethernet/chelsio/libcxgb/ |
H A D | libcxgb_ppm.c | 348 struct cxgbi_ppm_pool __percpu *pools; in ppm_alloc_cpu_pool() local 350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool() 367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool() 368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool() 370 if (!pools) in ppm_alloc_cpu_pool() 374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool() 384 return pools; in ppm_alloc_cpu_pool()
|
/linux/drivers/net/ethernet/freescale/dpaa2/ |
H A D | dpaa2-xsk.c | 162 pools_params->pools[curr_bp].priority_mask |= (1 << j); in dpaa2_xsk_set_bp_per_qdbin() 163 if (!pools_params->pools[curr_bp].priority_mask) in dpaa2_xsk_set_bp_per_qdbin() 166 pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; in dpaa2_xsk_set_bp_per_qdbin() 167 pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; in dpaa2_xsk_set_bp_per_qdbin() 168 pools_params->pools[curr_bp++].backup_pool = 0; in dpaa2_xsk_set_bp_per_qdbin()
|
/linux/drivers/soc/ti/ |
H A D | knav_qmss.h | 191 * @pools: list of descriptor pools in the region 203 struct list_head pools; member 207 * struct knav_pool: qmss pools 304 struct list_head pools; member 363 list_for_each_entry(pool, &kdev->pools, list)
|
H A D | knav_qmss_queue.c | 812 /* Region maintains a sorted (by region offset) list of pools in knav_pool_create() 817 node = ®ion->pools; in knav_pool_create() 818 list_for_each_entry(iter, ®ion->pools, region_inst) { in knav_pool_create() 832 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create() 1034 list_add(&pool->region_inst, ®ion->pools); in knav_queue_setup_region() 1119 INIT_LIST_HEAD(®ion->pools); in knav_queue_setup_regions() 1309 of_get_child_by_name(node, "queue-pools"); in knav_setup_queue_pools() 1314 "queue-pools not specified\n"); in knav_setup_queue_pools() 1361 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) in knav_queue_free_regions() 1808 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe() [all...] |
/linux/Documentation/devicetree/bindings/soc/ti/ |
H A D | keystone-navigator-qmss.txt | 6 processors(PDSP), linking RAM, descriptor pools and infrastructure 49 - queue-pools : child node classifying the queue ranges into pools. 50 Queue ranges are grouped into 3 type of pools: 151 queue-pools {
|
/linux/arch/sparc/include/asm/ |
H A D | iommu-common.h | 26 struct iommu_pool pools[IOMMU_NR_POOLS]; member
|
/linux/Documentation/networking/ |
H A D | page_pool.rst | 46 Information about page pools on the system can be accessed via the netdev 51 The number of pools created **must** match the number of hardware queues 106 with fragmented page pools.
|
/linux/Documentation/core-api/ |
H A D | swiotlb.rst | 142 as one or more "pools". The default pool is allocated during system boot with a 146 CONFIG_SWIOTLB_DYNAMIC is enabled, additional pools may be allocated later in 224 New pools added via dynamic swiotlb are linked together in a linear list. 227 large number of dynamic pools. The data structures could be improved for 232 not wasted, with dynamic pools making more space available if needed (as long 239 which includes the default memory pool and any dynamic or transient pools 307 Restricted pools 309 The swiotlb machinery is also used for "restricted pools", which are pools of 311 use by a particular device. Restricted pools provid [all...] |
/linux/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ |
H A D | ethernet-driver.rst | 26 - buffer pools 69 DPBPs represent hardware buffer pools. Packet I/O is performed in the context 124 The role of hardware buffer pools is storage of ingress frame data. Each network
|
/linux/Documentation/arch/arm/keystone/ |
H A D | knav-qmss.rst | 12 processors(PDSP), linking RAM, descriptor pools and infrastructure 25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
|
/linux/drivers/net/ethernet/wangxun/libwx/ |
H A D | wx_hw.c | 751 * @pools: VMDq "set" or "pool" index 756 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, in wx_set_rar() argument 772 wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); in wx_set_rar() 775 wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); in wx_set_rar() 950 wx->mac_table[i].pools, in wx_sync_mac_table() 968 wx->mac_table[i].pools, in wx_full_sync_mac_table() 981 wx->mac_table[0].pools = BIT(VMDQ_P(0)); in wx_mac_set_default_filter() 984 wx->mac_table[0].pools, in wx_mac_set_default_filter() 1000 wx->mac_table[i].pools = 0; in wx_flush_sw_mac_table() 1016 if (wx->mac_table[i].pools ! in wx_add_mac_filter() [all...] |
/linux/kernel/dma/ |
H A D | swiotlb.c | 91 .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools), 310 list_add_rcu(&pool->node, &mem->pools); in add_mem_pool() 618 * Allocate from the atomic pools if memory is encrypted and in swiotlb_alloc_tlb() 783 list_for_each_entry_rcu(pool, &mem->pools, node) { in __swiotlb_find_pool() 1126 * swiotlb_search_area() - search one memory area in all pools 1136 * Search one memory area in all pools for a sequence of slots that match the 1151 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_search_area() 1241 * Second, the load from mem->pools must be ordered before the same in swiotlb_find_slots() 1332 list_for_each_entry_rcu(pool, &mem->pools, nod in mem_used() [all...] |
/linux/drivers/md/ |
H A D | dm-table.c | 1058 struct dm_md_mempools *pools; in dm_table_alloc_md_mempools() local 1066 pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_table_alloc_md_mempools() 1067 if (!pools) in dm_table_alloc_md_mempools() 1091 if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags)) in dm_table_alloc_md_mempools() 1094 if (bioset_init(&pools->bs, pool_size, front_pad, 0)) in dm_table_alloc_md_mempools() 1097 t->mempools = pools; in dm_table_alloc_md_mempools() 1101 dm_free_md_mempools(pools); in dm_table_alloc_md_mempools()
|
/linux/drivers/nvme/host/ |
H A D | pci.c | 434 struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node]; in nvme_setup_descriptor_pools() local 437 if (pools->small) in nvme_setup_descriptor_pools() 438 return pools; /* already initialized */ in nvme_setup_descriptor_pools() 440 pools->large = dma_pool_create_node("nvme descriptor page", dev->dev, in nvme_setup_descriptor_pools() 442 if (!pools->large) in nvme_setup_descriptor_pools() 448 pools->small = dma_pool_create_node("nvme descriptor small", dev->dev, in nvme_setup_descriptor_pools() 450 if (!pools->small) { in nvme_setup_descriptor_pools() 451 dma_pool_destroy(pools->large); in nvme_setup_descriptor_pools() 452 pools->large = NULL; in nvme_setup_descriptor_pools() 456 return pools; in nvme_setup_descriptor_pools() 464 struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i]; nvme_release_descriptor_pools() local 476 struct nvme_descriptor_pools *pools; nvme_init_hctx_common() local [all...] |
/linux/arch/powerpc/kernel/ |
H A D | iommu.c | 102 * The hash is important to spread CPUs across all the pools. For example, 104 * with 4 pools all primary threads would map to the same pool. 254 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 282 pool = &(tbl->pools[0]); in iommu_range_alloc() 301 /* Now try scanning all the other pools */ in iommu_range_alloc() 304 pool = &tbl->pools[pool_nr]; in iommu_range_alloc() 431 p = &tbl->pools[pool_nr]; in get_pool() 757 p = &tbl->pools[i]; in iommu_init_table()
|
/linux/arch/arm/boot/dts/ti/keystone/ |
H A D | keystone-k2g-netcp.dtsi | 36 queue-pools {
|
/linux/drivers/soc/fsl/qbman/ |
H A D | qman_priv.h | 177 u32 pools; member
|
/linux/include/linux/ |
H A D | swiotlb.h | 91 * @nslabs: Total number of IO TLB slabs in all pools. 95 * @can_grow: %true if more pools can be allocated dynamically. 98 * @pools: List of IO TLB memory pool descriptors (if dynamic). 105 * @transient_nslabs: The total number of slots in all transient pools that 118 struct list_head pools; member 157 * dev->dma_uses_io_tlb here and mem->pools in __swiotlb_find_pool(). in swiotlb_find_pool()
|