Home
last modified time | relevance | path

Searched refs:flows (Results 1 – 25 of 81) sorted by relevance

1234

/linux/include/net/
H A Dfq_impl.h36 idx = flow - fq->flows; in __fq_adjust_removal()
152 flow = &fq->flows[idx]; in fq_flow_classify()
160 tin->flows++; in fq_flow_classify()
173 struct fq_flow *cur = &fq->flows[i]; in fq_find_fattest_flow()
361 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init()
362 if (!fq->flows) in fq_init()
367 kvfree(fq->flows); in fq_init()
368 fq->flows = NULL; in fq_init()
373 fq_flow_init(&fq->flows[ in fq_init()
[all...]
H A Dfq.h43 * pull interleaved packets out of the associated flows.
57 u32 flows; member
65 * @limit: max number of packets that can be queued across all flows
66 * @backlog: number of packets queued across all flows
69 struct fq_flow *flows; member
H A Drps.h44 struct rps_dev_flow flows[]; member
50 * The rps_sock_flow_table contains mappings of flows to the last CPU
/linux/drivers/crypto/allwinner/sun8i-ss/
H A Dsun8i-ss-core.c76 ss->flows[flow].stat_req++; in sun8i_ss_run_task()
132 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_task()
133 ss->flows[flow].status = 0; in sun8i_ss_run_task()
138 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_task()
140 if (ss->flows[flow].status == 0) { in sun8i_ss_run_task()
159 ss->flows[flow].status = 1; in ss_irq_handler()
160 complete(&ss->flows[flow].complete); in ss_irq_handler()
478 ss->flows[i].stat_req); in sun8i_ss_debugfs_show()
536 crypto_engine_exit(ss->flows[i].engine); in sun8i_ss_free_flows()
548 ss->flows in allocate_flows()
[all...]
H A Dsun8i-ss-prng.c134 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_prng_generate()
135 ss->flows[flow].status = 0; in sun8i_ss_prng_generate()
141 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_prng_generate()
143 if (ss->flows[flow].status == 0) { in sun8i_ss_prng_generate()
/linux/samples/bpf/
H A Ddo_hbm_test.sh18 echo " [-f=<#flows>|--flows=<#flows>] [-h] [-i=<id>|--id=<id >]"
34 echo " -f or --flows number of concurrent flows (default=1)"
38 echo " -l do not limit flows using loopback"
78 flows=1
150 -f=*|--flows=*)
151 flows="${i#*=}"
278 while [ $flow_cnt -le $flows ] ; d
[all...]
/linux/net/sched/
H A Dsch_fq_pie.c22 * - Packets are classified on flows.
23 * - This is a Stochastic model (as we use a hash, several flows might
27 * so that new flows have priority on old ones.
58 struct fq_pie_flow *flows; member
152 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue()
308 if (q->flows) { in fq_pie_change()
310 "Number of flows cannot be changed"); in fq_pie_change()
316 "Number of flows must range in [1..65536]"); in fq_pie_change()
397 /* Limit this expensive loop to 2048 flows per round. */ in fq_pie_timer()
401 &q->flows[ in fq_pie_timer()
[all...]
H A Dsch_cake.c29 * flows from each other. This prevents a burst on one flow from increasing
34 * Codel and Blue AQM algorithms. This serves flows fairly, and signals
151 struct cake_flow flows[CAKE_QUEUES]; member
268 * obtain the best features of each. Codel is excellent on flows which
270 * unresponsive flows.
787 q->flows[reduced_hash].set)) { in cake_hash()
805 if (!q->flows[outer_hash + k].set) { in cake_hash()
820 if (!q->flows[outer_hash + k].set) { in cake_hash()
835 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { in cake_hash()
836 cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_has in cake_hash()
[all...]
/linux/drivers/dma/ti/
H A Dk3-udma-glue.c84 struct k3_udma_glue_rx_flow *flows; member
87 bool single_fdq; /* one FDQ for all flows */
691 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); in k3_udma_glue_cfg_rx_chn()
716 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow()
736 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow()
1036 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv()
1037 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv()
1038 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv()
1048 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv()
1088 rx_chn->flows in k3_udma_glue_request_remote_rx_chn_common()
[all...]
/linux/drivers/media/platform/amphion/
H A Dvpu_dbg.c220 for (i = 0; i < ARRAY_SIZE(inst->flows); i++) { in vpu_dbg_instance()
221 u32 idx = (inst->flow_idx + i) % (ARRAY_SIZE(inst->flows)); in vpu_dbg_instance()
223 if (!inst->flows[idx]) in vpu_dbg_instance()
226 inst->flows[idx] >= VPU_MSG_ID_NOOP ? "M" : "C", in vpu_dbg_instance()
227 vpu_id_name(inst->flows[idx])); in vpu_dbg_instance()
519 inst->flows[inst->flow_idx] = flow; in vpu_inst_record_flow()
520 inst->flow_idx = (inst->flow_idx + 1) % (ARRAY_SIZE(inst->flows)); in vpu_inst_record_flow()
/linux/drivers/infiniband/hw/hfi1/
H A Dtid_rdma.c47 /* Reserved generation value to set to unused flows for kernel contexts */
757 u32 generation = rcd->flows[flow_idx].generation; in kern_setup_hw_flow()
775 rcd->flows[flow_idx].generation = in kern_clear_hw_flow()
776 kern_flow_generation_next(rcd->flows[flow_idx].generation); in kern_clear_hw_flow()
804 rcd->flows[fs->index].generation = fs->generation; in hfi1_kern_setup_hw_flow()
853 rcd->flows[i].generation = mask_generation(get_random_u32()); in hfi1_kern_init_ctxt_generations()
1436 * (6) Reserves and programs HW flows.
1443 * invocation of function call. With flow = &req->flows[req->flow_idx],
1465 struct tid_rdma_flow *flow = &req->flows[req->setup_head]; in hfi1_kern_exp_rcv_setup()
1556 struct tid_rdma_flow *flow = &req->flows[re in hfi1_kern_exp_rcv_clear()
1634 struct tid_rdma_flow *flows; hfi1_kern_exp_rcv_alloc_flows() local
[all...]
/linux/Documentation/networking/
H A Dnf_flowtable.rst33 specifies what flows are placed into the flowtable. Hence, packets follow the
34 classic IP forwarding path unless the user explicitly instruct flows to use this
111 You can identify offloaded flows through the [OFFLOAD] tag when listing your
130 instead the real device is sufficient for the flowtable to track your flows.
198 There is a workqueue that adds the flows to the hardware. Note that a few
202 You can identify hardware offloaded flows through the [HW_OFFLOAD] tag when
H A Dscaling.rst31 of logical flows. Packets for each flow are steered to a separate receive
50 applications that monitor TCP/IP flows (IDS, firewalls, ...etc) and need
252 to the same CPU is CPU load imbalance if flows vary in packet rate.
258 Flow Limit is an optional RPS feature that prioritizes small flows
259 during CPU contention by dropping packets from large flows slightly
260 ahead of those from small flows. It is active only when an RPS or RFS
266 new packet is dropped. Packets from other flows are still only
270 even large flows maintain connectivity.
288 identification of large flows and fewer false positives. The default
325 flows t
[all...]
H A Diou-zcrx.rst34 typically distribute flows across all HW Rx queues. Flow steering is required
35 to ensure that only desired flows are directed towards HW queues that are
42 copy flows away from queues that are configured for io_uring ZC Rx.
H A Dopenvswitch.rst16 table" that userspace populates with "flows" that map from keys based
104 A wildcarded flow can represent a group of exact match flows. Each '1' bit
108 by reduce the number of new flows need to be processed by the user space program.
120 two possible approaches: reactively install flows as they miss the kernel
130 The behavior when using overlapping wildcarded flows is undefined. It is the
133 performs best-effort detection of overlapping wildcarded flows and may reject
146 future operations. The kernel is not required to index flows by the original
H A Dpktgen.rst97 flows: 0 flowlen: 0
112 flows: 0
286 pgset "flows 1"
391 flows
/linux/Documentation/admin-guide/pm/
H A Dsystem-wide.rst11 suspend-flows
/linux/Documentation/userspace-api/media/mediactl/
H A Dmedia-controller-model.rst26 by an entity flows from the entity's output to one or more entity
31 pads, either on the same entity or on different entities. Data flows
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_rep.h162 * neigh hash entry flows. Use it to periodically update the neigh
183 struct list_head flows; member
208 struct list_head flows; member
H A Deswitch_offloads.c56 /* There are two match-all miss flows, one for unicast dst mac and
355 /* Indirect table is supported only for flows with in_port uplink in esw_is_indir_table()
1188 struct mlx5_flow_handle **flows; in esw_add_fdb_peer_miss_rules() local
1206 flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL); in esw_add_fdb_peer_miss_rules()
1207 if (!flows) { in esw_add_fdb_peer_miss_rules()
1227 flows[peer_vport->index] = flow; in esw_add_fdb_peer_miss_rules()
1239 flows[peer_vport->index] = flow; in esw_add_fdb_peer_miss_rules()
1254 flows[peer_vport->index] = flow; in esw_add_fdb_peer_miss_rules()
1269 flows[peer_vpor in esw_add_fdb_peer_miss_rules()
1321 struct mlx5_flow_handle **flows; esw_del_fdb_peer_miss_rules() local
[all...]
/linux/net/core/
H A Dpktgen.c194 pf(FLOW_SEQ) /* Sequential flows */ \
195 pf(IPSEC) /* ipsec on for flows */ \
417 struct flow_state *flows; member
418 unsigned int cflows; /* Concurrent flows (config) */
420 unsigned int nflows; /* accumulated flows (stats) */
591 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, in pktgen_if_show()
738 seq_printf(seq, " flows: %u\n", pkt_dev->nflows); in pktgen_if_show()
1624 if (!strcmp(name, "flows")) { in pktgen_if_write()
1634 sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows); in pktgen_if_write()
2371 return !!(pkt_dev->flows[flo in f_seen()
[all...]
/linux/Documentation/admin-guide/blockdev/drbd/
H A Dfigures.rst5 Data flows that Relate some functions, and write packets
/linux/net/mctp/test/
H A Droute-test.c1012 struct mctp_flow *flows[2]; in mctp_test_fragment_flow() local
1036 flows[0] = skb_ext_find(tx_skbs[0], SKB_EXT_MCTP); in mctp_test_fragment_flow()
1037 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]); in mctp_test_fragment_flow()
1038 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]->key); in mctp_test_fragment_flow()
1039 KUNIT_ASSERT_PTR_EQ(test, flows[0]->key->sk, sock->sk); in mctp_test_fragment_flow()
1041 flows[1] = skb_ext_find(tx_skbs[1], SKB_EXT_MCTP); in mctp_test_fragment_flow()
1042 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[1]); in mctp_test_fragment_flow()
1043 KUNIT_ASSERT_PTR_EQ(test, flows[1]->key, flows[0]->key); in mctp_test_fragment_flow()
/linux/drivers/net/ethernet/ti/
H A Dam65-cpsw-nuss.c515 flow = &rx_chn->flows[id]; in am65_cpsw_destroy_rxq()
575 flow = &rx_chn->flows[id]; in am65_cpsw_create_rxq()
791 am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false); in am65_cpsw_nuss_rx_cleanup()
2357 struct am65_cpsw_rx_flow *flows; in am65_cpsw_nuss_remove_rx_chns() local
2361 flows = rx_chn->flows; in am65_cpsw_nuss_remove_rx_chns()
2364 if (!(flows[i].irq < 0)) in am65_cpsw_nuss_remove_rx_chns()
2365 devm_free_irq(dev, flows[i].irq, &flows[i]); in am65_cpsw_nuss_remove_rx_chns()
2366 netif_napi_del(&flows[ in am65_cpsw_nuss_remove_rx_chns()
[all...]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dtc_tun_encap.c193 /* Do not offload flows with unresolved neighbors */ in mlx5e_tc_encap_flows_add()
292 /* Takes reference to all flows attached to encap and adds the flows to
300 list_for_each_entry(efi, &e->flows, list) { in mlx5e_take_all_encap_flows()
306 /* Takes reference to all flows attached to route and adds the flows to
421 list_for_each_entry_safe(efi, tmp, &e->flows, list) { in mlx5e_tc_update_neigh_used_value()
466 WARN_ON(!list_empty(&e->flows)); in mlx5e_encap_dealloc()
483 WARN_ON(!list_empty(&d->flows)); in mlx5e_decap_dealloc()
889 INIT_LIST_HEAD(&e->flows); in mlx5e_attach_encap()
[all...]

1234