/linux-6.8/tools/testing/selftests/drivers/net/mlxsw/ |
D | sharedbuffer_configuration.py | 28 def __init__(self, pools): argument 30 for pool in pools: 72 # The threshold type of pools 4, 8, 9 and 10 cannot be changed 110 # The threshold type of pools 4, 8, 9 and 10 cannot be changed 187 pools = PoolList() 190 pools.append(Pool(pooldict)) 191 return pools 194 def do_check_pools(dlname, pools, vp): argument 195 for pool in pools: 219 def check_pools(dlname, pools): argument [all …]
|
D | qos_pfc.sh | 68 # iPOOL1+ePOOL5 are overflow pools. 69 # iPOOL2+ePOOL6 are PFC pools. 125 # pools 145 # Control traffic pools. Just reduce the size. Keep them dynamic so that 150 # Overflow pools. 154 # PFC pools. As per the writ, the size of egress PFC pool should be
|
/linux-6.8/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_arg.c | 28 struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX]; member 201 arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]); in mlx5dr_arg_get_obj() 226 dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj); in mlx5dr_arg_put_obj() 245 pool_mgr->pools[i] = dr_arg_pool_create(dmn, i); in mlx5dr_arg_mgr_create() 246 if (!pool_mgr->pools[i]) in mlx5dr_arg_mgr_create() 254 dr_arg_pool_destroy(pool_mgr->pools[i]); in mlx5dr_arg_mgr_create() 262 struct dr_arg_pool **pools; in mlx5dr_arg_mgr_destroy() local 268 pools = mgr->pools; in mlx5dr_arg_mgr_destroy() 270 dr_arg_pool_destroy(pools[i]); in mlx5dr_arg_mgr_destroy()
|
/linux-6.8/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | pool.c | 25 if (!xsk->pools) { in mlx5e_xsk_get_pools() 26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools() 27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools() 28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools() 41 kfree(xsk->pools); in mlx5e_xsk_put_pools() 42 xsk->pools = NULL; in mlx5e_xsk_put_pools() 54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool() 60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
|
D | pool.h | 12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool() 18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
|
/linux-6.8/drivers/usb/core/ |
D | buffer.c | 52 * hcd_buffer_create - initialize buffer pools 53 * @hcd: the bus whose buffer pools are to be initialized 58 * memory allocators. It initializes some pools of dma-coherent memory that 61 * Call hcd_buffer_destroy() to clean up after using those pools. 90 * hcd_buffer_destroy - deallocate buffer pools 91 * @hcd: the bus whose buffer pools are to be destroyed 95 * This frees the buffer pools created by hcd_buffer_create().
|
/linux-6.8/tools/net/ynl/samples/ |
D | page-pool.c | 60 struct netdev_page_pool_get_list *pools; in main() local 76 pools = netdev_page_pool_get_dump(ys); in main() 77 if (!pools) in main() 80 ynl_dump_foreach(pools, pp) { in main() 87 netdev_page_pool_get_list_free(pools); in main() 122 printf("page pools: %u (zombies: %u)\n", in main()
|
/linux-6.8/arch/sparc/kernel/ |
D | iommu-common.c | 82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init() 83 iommu->pools[i].start = start; in iommu_tbl_pool_init() 84 iommu->pools[i].hint = start; in iommu_tbl_pool_init() 86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init() 131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc() 193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 237 p = &tbl->pools[pool_nr]; in get_pool()
|
/linux-6.8/Documentation/core-api/ |
D | workqueue.rst | 60 * Use per-CPU unified worker pools shared by all wq to provide 83 called worker-pools. 87 which manages worker-pools and processes the queued work items. 89 There are two worker-pools, one for normal work items and the other 91 worker-pools to serve work items queued on unbound workqueues - the 92 number of these backing pools is dynamic. 132 For unbound workqueues, the number of backing pools is dynamic. 135 backing worker pools matching the attributes. The responsibility of 169 worker-pools which host workers which are not bound to any 172 worker-pools try to start execution of work items as soon as [all …]
|
/linux-6.8/drivers/soc/ti/ |
D | knav_qmss.h | 191 * @pools: list of descriptor pools in the region 203 struct list_head pools; member 207 * struct knav_pool: qmss pools 304 struct list_head pools; member 363 list_for_each_entry(pool, &kdev->pools, list)
|
/linux-6.8/include/linux/ |
D | swiotlb.h | 109 * @nslabs: Total number of IO TLB slabs in all pools. 113 * @can_grow: %true if more pools can be allocated dynamically. 116 * @pools: List of IO TLB memory pool descriptors (if dynamic). 134 struct list_head pools; member 182 * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool(). in is_swiotlb_buffer()
|
/linux-6.8/drivers/net/ethernet/freescale/fman/ |
D | fman.h | 74 #define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */ 75 #define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */ 166 * buffer pools used by a port or storage-profile. 174 * buffer pools allocated in the BM and used by a port or a 178 u8 num_of_pools_used; /* Number of pools use by this port */ 186 * number of pools (all together!) are depleted 189 /* the number of depleted pools that will invoke pause
|
/linux-6.8/mm/ |
D | dmapool.c | 60 struct list_head pools; member 80 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 92 static DEVICE_ATTR_RO(pools); 210 * Given one of these pools, dma_pool_alloc() 267 INIT_LIST_HEAD(&retval->pools); in dma_pool_create() 280 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create() 288 list_del(&retval->pools); in dma_pool_create() 370 list_del(&pool->pools); in dma_pool_destroy()
|
/linux-6.8/Documentation/netlink/specs/ |
D | netdev.yaml | 128 which got destroyed already (page pools may outlast their netdevs 161 Page Pools wait for all the memory allocated from them to be freed 162 before truly disappearing. "Detached" Page Pools cannot be 305 Get / dump information about Page Pools. 306 (Only Page Pools associated with a net_device can be listed.)
|
/linux-6.8/Documentation/devicetree/bindings/soc/ti/ |
D | keystone-navigator-qmss.txt | 6 processors(PDSP), linking RAM, descriptor pools and infrastructure 49 - queue-pools : child node classifying the queue ranges into pools. 50 Queue ranges are grouped into 3 type of pools: 151 queue-pools {
|
/linux-6.8/drivers/net/ethernet/chelsio/libcxgb/ |
D | libcxgb_ppm.c | 348 struct cxgbi_ppm_pool *pools; in ppm_alloc_cpu_pool() local 350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool() 367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool() 368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool() 370 if (!pools) in ppm_alloc_cpu_pool() 374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool() 384 return pools; in ppm_alloc_cpu_pool()
|
/linux-6.8/include/soc/fsl/ |
D | bman.h | 47 * hardware pools as a dma_addr_t 77 /* Portal and Buffer Pools */ 81 #define BM_POOL_MAX 64 /* max # of buffer pools */
|
/linux-6.8/drivers/net/ethernet/freescale/dpaa2/ |
D | dpaa2-xsk.c | 162 pools_params->pools[curr_bp].priority_mask |= (1 << j); in dpaa2_xsk_set_bp_per_qdbin() 163 if (!pools_params->pools[curr_bp].priority_mask) in dpaa2_xsk_set_bp_per_qdbin() 166 pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; in dpaa2_xsk_set_bp_per_qdbin() 167 pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; in dpaa2_xsk_set_bp_per_qdbin() 168 pools_params->pools[curr_bp++].backup_pool = 0; in dpaa2_xsk_set_bp_per_qdbin()
|
D | dpsw.h | 196 * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration 198 * @pools: Array of buffer pools parameters; The number of valid entries 200 * @pools.dpbp_id: DPBP object ID 201 * @pools.buffer_size: Buffer size 202 * @pools.backup_pool: Backup pool 210 } pools[DPSW_MAX_DPBP]; member
|
/linux-6.8/drivers/net/wireless/ath/ |
D | dfs_pri_detector.c | 73 * DOC: Singleton Pulse and Sequence Pools 75 * Instances of pri_sequence and pulse_elem are kept in singleton pools to 79 * Memory is freed after all references to the pools are released. 100 /* free singleton pools with no references left */ in pool_deregister_ref() 357 /* free pulse queue and sequences list and give objects back to pools */
|
/linux-6.8/drivers/net/ethernet/netronome/nfp/ |
D | nfp_abi.h | 60 * @ingress_pools_count: number of ingress pools 61 * @egress_pools_count: number of egress pools
|
/linux-6.8/tools/workqueue/ |
D | wq_dump.py | 21 Worker Pools 24 Lists all worker pools indexed by their ID. For each pool: 124 print('Worker Pools')
|
/linux-6.8/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ |
D | ethernet-driver.rst | 26 - buffer pools 40 object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators 69 DPBPs represent hardware buffer pools. Packet I/O is performed in the context 124 The role of hardware buffer pools is storage of ingress frame data. Each network
|
/linux-6.8/arch/powerpc/kernel/ |
D | iommu.c | 100 * The hash is important to spread CPUs across all the pools. For example, 102 * with 4 pools all primary threads would map to the same pool. 252 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 280 pool = &(tbl->pools[0]); in iommu_range_alloc() 299 /* Now try scanning all the other pools */ in iommu_range_alloc() 302 pool = &tbl->pools[pool_nr]; in iommu_range_alloc() 429 p = &tbl->pools[pool_nr]; in get_pool() 755 p = &tbl->pools[i]; in iommu_init_table() 1122 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_take_ownership() 1132 spin_unlock(&tbl->pools[i].lock); in iommu_take_ownership() [all …]
|
/linux-6.8/net/sunrpc/ |
D | svc.c | 9 * Multiple threads pools and NUMAisation 43 * Mode for mapping cpus to pools. 54 * Structure for mapping cpus to pools and vice versa. 139 * so split pools on NUMA node boundaries in svc_pool_map_choose_mode() 150 * want to divide the pools on cpu boundaries. in svc_pool_map_choose_mode() 184 * Returns number of pools or <0 on error. 212 * Returns number of pools or <0 on error. 240 * Add a reference to the global map of cpus to pools (and 241 * vice versa) if pools are in use. 243 * Returns the number of pools. If this is '1', no reference [all …]
|