Home
last modified time | relevance | path

Searched refs:cbs (Results 1 – 25 of 63) sorted by relevance

123

/linux/kernel/bpf/
H A Ddisasm.c16 static const char *__func_get_name(const struct bpf_insn_cbs *cbs, in __func_get_name() argument
27 if (cbs && cbs->cb_call) { in __func_get_name()
30 res = cbs->cb_call(cbs->private_data, insn); in __func_get_name()
43 static const char *__func_imm_name(const struct bpf_insn_cbs *cbs, in __func_imm_name() argument
47 if (cbs && cbs->cb_imm) in __func_imm_name()
48 return cbs->cb_imm(cbs in __func_imm_name()
186 print_bpf_insn(const struct bpf_insn_cbs * cbs,const struct bpf_insn * insn,bool allow_ptr_leaks) print_bpf_insn() argument
[all...]
H A Ddisasm.h37 void print_bpf_insn(const struct bpf_insn_cbs *cbs,
/linux/tools/testing/selftests/bpf/
H A Ddisasm.c16 static const char *__func_get_name(const struct bpf_insn_cbs *cbs, in __func_get_name() argument
27 if (cbs && cbs->cb_call) { in __func_get_name()
30 res = cbs->cb_call(cbs->private_data, insn); in __func_get_name()
43 static const char *__func_imm_name(const struct bpf_insn_cbs *cbs, in __func_imm_name() argument
47 if (cbs && cbs->cb_imm) in __func_imm_name()
48 return cbs->cb_imm(cbs in __func_imm_name()
186 print_bpf_insn(const struct bpf_insn_cbs * cbs,const struct bpf_insn * insn,bool allow_ptr_leaks) print_bpf_insn() argument
[all...]
H A Ddisasm_helpers.c44 struct bpf_insn_cbs cbs = { in disasm_insn() local
53 print_bpf_insn(&cbs, insn, true); in disasm_insn()
H A Ddisasm.h37 void print_bpf_insn(const struct bpf_insn_cbs *cbs,
/linux/drivers/net/ethernet/microchip/lan966x/
H A Dlan966x_cbs.c9 u32 cir, cbs; in lan966x_cbs_add() local
20 cbs = (qopt->idleslope - qopt->sendslope) * in lan966x_cbs_add()
29 cbs = DIV_ROUND_UP(cbs, 4096); in lan966x_cbs_add()
31 cbs = cbs ?: 1; in lan966x_cbs_add()
35 cbs > GENMASK(6, 0)) in lan966x_cbs_add()
45 QSYS_CIR_CFG_CIR_BURST_SET(cbs), in lan966x_cbs_add()
H A Dlan966x_tbf.c11 u32 cir, cbs; in lan966x_tbf_add() local
26 cbs = qopt->replace_params.max_size; in lan966x_tbf_add()
33 cbs = DIV_ROUND_UP(cbs, 4096); in lan966x_tbf_add()
35 cbs = cbs ?: 1; in lan966x_tbf_add()
39 cbs > GENMASK(6, 0)) in lan966x_tbf_add()
49 QSYS_CIR_CFG_CIR_BURST_SET(cbs), in lan966x_tbf_add()
/linux/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_binding.h217 extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
220 extern void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
222 extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs,
229 extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
230 extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
231 extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
234 extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
236 vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
237 extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
H A Dvmwgfx_context.c38 struct vmw_ctx_binding_state *cbs; member
156 vmw_binding_state_kill(uctx->cbs); in vmw_hw_context_destroy()
207 uctx->cbs = vmw_binding_state_alloc(dev_priv); in vmw_gb_context_init()
208 if (IS_ERR(uctx->cbs)) { in vmw_gb_context_init()
209 ret = PTR_ERR(uctx->cbs); in vmw_gb_context_init()
393 vmw_binding_state_scrub(uctx->cbs); in vmw_gb_context_unbind()
559 vmw_binding_state_scrub(uctx->cbs); in vmw_dx_context_scrub_cotables()
692 if (ctx->cbs) in vmw_user_context_free()
693 vmw_binding_state_free(ctx->cbs); in vmw_user_context_free()
811 return vmw_binding_state_list(uctx->cbs); in vmw_context_binding_list()
[all...]
/linux/drivers/net/ethernet/mscc/
H A Docelot_police.c27 u32 cir = 0, cbs = 0, pir = 0, pbs = 0; in qos_policer_conf_set() local
48 cbs = conf->cbs; in qos_policer_conf_set()
49 if (cir == 0 && cbs == 0) { in qos_policer_conf_set()
55 cbs = DIV_ROUND_UP(cbs, 4096); in qos_policer_conf_set()
56 cbs = (cbs ? cbs : 1); /* No zero burst size */ in qos_policer_conf_set()
125 if (cbs > cbs_ma in qos_policer_conf_set()
[all...]
H A Docelot_police.h28 u32 cbs; /* CBS in bytes/frames (ignored in SLB mode) */ member
/linux/net/sched/
H A Dsch_cbs.c253 struct tc_cbs_qopt_offload cbs = { }; in cbs_disable_offload() local
267 cbs.queue = q->queue; in cbs_disable_offload()
268 cbs.enable = 0; in cbs_disable_offload()
270 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs); in cbs_disable_offload()
273 cbs.queue); in cbs_disable_offload()
281 struct tc_cbs_qopt_offload cbs = { }; in cbs_enable_offload() local
285 NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload"); in cbs_enable_offload()
289 cbs.queue = q->queue; in cbs_enable_offload()
291 cbs.enable = 1; in cbs_enable_offload()
292 cbs in cbs_enable_offload()
[all...]
/linux/drivers/net/ethernet/qlogic/qed/
H A Dqed_ll2.c58 const struct qed_ll2_cb_ops *cbs; member
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb) in qed_ll2b_complete_tx_packet()
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb, in qed_ll2b_complete_tx_packet()
225 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) { in qed_ll2b_complete_rx_packet()
229 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, in qed_ll2b_complete_rx_packet()
337 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie, in qed_ll2_txq_flush()
401 p_ll2_conn->cbs in qed_ll2_txq_completion()
1327 qed_ll2_set_cbs(struct qed_ll2_info * p_ll2_info,const struct qed_ll2_cbs * cbs) qed_ll2_set_cbs() argument
[all...]
/linux/drivers/misc/sgi-gru/
H A Dgru_instructions.h628 struct gru_control_block_status *cbs = (void *)cb; in gru_get_cb_status() local
630 return cbs->istatus; in gru_get_cb_status()
636 struct gru_control_block_status *cbs = (void *)cb; in gru_get_cb_message_queue_substatus() local
638 return cbs->isubstatus & CBSS_MSG_QUEUE_MASK; in gru_get_cb_message_queue_substatus()
644 struct gru_control_block_status *cbs = (void *)cb; in gru_get_cb_substatus() local
646 return cbs->isubstatus; in gru_get_cb_substatus()
657 struct gru_control_block_status *cbs = (void *)cb; in gru_check_status() local
660 ret = cbs->istatus; in gru_check_status()
/linux/drivers/gpu/drm/nouveau/dispnv04/
H A Darb.c58 int found, mclk_extra, mclk_loop, cbs, m1, p1; in nv04_calc_arb() local
69 cbs = 128; in nv04_calc_arb()
92 m1 = clwm + cbs - 512; in nv04_calc_arb()
103 fifo->burst = cbs; in nv04_calc_arb()
/linux/drivers/net/dsa/sja1105/
H A Dsja1105_main.c2158 if (priv->cbs[i].port == port && priv->cbs[i].prio == prio) in sja1105_find_cbs_shaper()
2172 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) in sja1105_find_unused_cbs_shaper()
2184 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_delete_cbs_shaper() local
2186 if (cbs->port == port && cbs->prio == prio) { in sja1105_delete_cbs_shaper()
2187 memset(cbs, 0, sizeof(*cbs)); in sja1105_delete_cbs_shaper()
2200 struct sja1105_cbs_entry *cbs; sja1105_setup_tc_cbs() local
2257 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; sja1105_reload_cbs() local
[all...]
/linux/tools/bpf/bpftool/
H A Dxlated_dumper.c222 const struct bpf_insn_cbs cbs = { in dump_xlated_json() local
271 print_bpf_insn(&cbs, insn + i, true); in dump_xlated_json()
307 const struct bpf_insn_cbs cbs = { in dump_xlated_plain() local
353 print_bpf_insn(&cbs, insn + i, true); in dump_xlated_plain()
371 const struct bpf_insn_cbs cbs = { in dump_xlated_for_graph() local
419 print_bpf_insn(&cbs, cur, true); in dump_xlated_for_graph()
/linux/drivers/net/ethernet/intel/
H A De100.c533 struct param_range cbs; member
555 struct cb *cbs; member
1038 struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; in e100_get_defaults() local
1046 nic->params.cbs = cbs; in e100_get_defaults()
1819 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), in e100_tx_clean()
1849 if (nic->cbs) { in e100_clean_cbs()
1850 while (nic->cbs_avail != nic->params.cbs.count) { in e100_clean_cbs()
1862 dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); in e100_clean_cbs()
1863 nic->cbs in e100_clean_cbs()
2561 struct param_range *cbs = &nic->params.cbs; e100_get_ringparam() local
2576 struct param_range *cbs = &nic->params.cbs; e100_set_ringparam() local
[all...]
/linux/kernel/rcu/
H A Dtree_stall.h937 unsigned long cbs = 0; in show_rcu_gp_kthreads() local
991 cbs += data_race(READ_ONCE(rdp->n_cbs_invoked)); in show_rcu_gp_kthreads()
994 pr_info("RCU callbacks invoked since boot: %lu\n", cbs); in show_rcu_gp_kthreads()
1064 unsigned long cbs; in rcu_fwd_progress_check() local
1083 cbs = rcu_get_n_cbs_cpu(cpu); in rcu_fwd_progress_check()
1084 if (!cbs) in rcu_fwd_progress_check()
1088 pr_cont(" %d: %lu", cpu, cbs); in rcu_fwd_progress_check()
1089 if (cbs <= max_cbs) in rcu_fwd_progress_check()
1091 max_cbs = cbs; in rcu_fwd_progress_check()
/linux/drivers/infiniband/hw/qedr/
H A Dqedr_roce_cm.c266 struct qed_ll2_cbs cbs; in qedr_ll2_start() local
270 cbs.rx_comp_cb = qedr_ll2_complete_rx_packet; in qedr_ll2_start()
271 cbs.tx_comp_cb = qedr_ll2_complete_tx_packet; in qedr_ll2_start()
272 cbs.rx_release_cb = qedr_ll2_release_rx_packet; in qedr_ll2_start()
273 cbs.tx_release_cb = qedr_ll2_complete_tx_packet; in qedr_ll2_start()
274 cbs.cookie = dev; in qedr_ll2_start()
289 data.cbs = &cbs; in qedr_ll2_start()
/linux/drivers/video/fbdev/nvidia/
H A Dnv_hw.c246 int found, mclk_extra, mclk_loop, cbs, m1, p1; in nv4CalcArbitration() local
263 cbs = 128; in nv4CalcArbitration()
304 cbs * 1000 * 1000 / 16 / nvclk_freq; in nv4CalcArbitration()
307 cbs * 1000 * 1000 / (8 * width) / in nv4CalcArbitration()
341 m1 = clwm + cbs - 512; in nv4CalcArbitration()
420 int found, mclk_extra, mclk_loop, cbs, m1; in nv10CalcArbitration() local
441 cbs = 512; in nv10CalcArbitration()
559 cbs = 512; in nv10CalcArbitration()
572 m1 = clwm + cbs - 1024; /* Amount of overfill */ in nv10CalcArbitration()
583 if (cbs < in nv10CalcArbitration()
[all...]
/linux/drivers/net/ethernet/freescale/enetc/
H A Denetc_qos.c237 struct tc_cbs_qopt_offload *cbs = type_data; in enetc_setup_tc_cbs() local
244 u8 tc = cbs->queue; in enetc_setup_tc_cbs()
252 /* Support highest prio and second prio tc in cbs mode */ in enetc_setup_tc_cbs()
256 if (!cbs->enable) { in enetc_setup_tc_cbs()
274 if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || in enetc_setup_tc_cbs()
275 cbs->idleslope < 0 || cbs->sendslope > 0) in enetc_setup_tc_cbs()
280 bw = cbs->idleslope / (port_transmit_rate * 10UL); in enetc_setup_tc_cbs()
456 u32 cbs; member
[all...]
/linux/drivers/net/ethernet/amd/xgbe/
H A Dxgbe-dcb.c23 ets->cbs = pdata->ets->cbs; in xgbe_dcb_ieee_getets()
/linux/drivers/video/fbdev/riva/
H A Driva_hw.c660 int found, mclk_extra, mclk_loop, cbs, m1, p1; in nv4CalcArbitration() local
677 cbs = 128; in nv4CalcArbitration()
718 video_fill_us = cbs*1000*1000 / 16 / nvclk_freq ; in nv4CalcArbitration()
720 video_fill_us = cbs*1000*1000 / (8 * width) / mclk_freq; in nv4CalcArbitration()
751 m1 = clwm + cbs - 512; in nv4CalcArbitration()
842 int found, mclk_extra, mclk_loop, cbs, m1; in nv10CalcArbitration() local
863 cbs = 512; in nv10CalcArbitration()
967 // What happens if the latency to fetch the cbs is so large that in nv10CalcArbitration()
971 us_crt = (cbs * 1000 * 1000)/ (8*width)/mclk_freq ; in nv10CalcArbitration()
986 cbs in nv10CalcArbitration()
[all...]
/linux/drivers/dma/ppc4xx/
H A Dxor.h70 u32 cbs; /* status */ member

123