Home
last modified time | relevance | path

Searched refs:qs (Results 1 – 25 of 50) sorted by relevance

12

/linux/fs/ocfs2/cluster/
H A Dquorum.c94 struct o2quo_state *qs = &o2quo_state; in o2quo_make_decision() local
96 spin_lock_bh(&qs->qs_lock); in o2quo_make_decision()
98 lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES); in o2quo_make_decision()
100 lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm); in o2quo_make_decision()
103 "lowest: %d (%sreachable)\n", qs->qs_heartbeating, in o2quo_make_decision()
104 qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un"); in o2quo_make_decision()
106 if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) || in o2quo_make_decision()
107 qs->qs_heartbeating == 1) in o2quo_make_decision()
110 if (qs->qs_heartbeating & 1) { in o2quo_make_decision()
113 quorum = (qs->qs_heartbeating + 1)/2; in o2quo_make_decision()
[all …]
/linux/tools/net/ynl/ynltool/
H A Dqstats.c31 ynl_dump_foreach(qstats, qs) { in print_json_qstats()
37 name = if_indextoname(qs->ifindex, ifname); in print_json_qstats()
40 jsonw_uint_field(json_wtr, "ifindex", qs->ifindex); in print_json_qstats()
42 if (qs->_present.queue_type) in print_json_qstats()
44 netdev_queue_type_str(qs->queue_type)); in print_json_qstats()
45 if (qs->_present.queue_id) in print_json_qstats()
46 jsonw_uint_field(json_wtr, "queue-id", qs->queue_id); in print_json_qstats()
48 if (qs->_present.rx_packets || qs->_present.rx_bytes || in print_json_qstats()
49 qs->_present.rx_alloc_fail || qs->_present.rx_hw_drops || in print_json_qstats()
50 qs->_present.rx_csum_complete || qs->_present.rx_hw_gro_packets) { in print_json_qstats()
[all …]
/linux/kernel/bpf/
H A Dqueue_stack_maps.c31 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs) in queue_stack_map_is_empty() argument
33 return qs->head == qs->tail; in queue_stack_map_is_empty()
36 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) in queue_stack_map_is_full() argument
38 u32 head = qs->head + 1; in queue_stack_map_is_full()
40 if (unlikely(head >= qs->size)) in queue_stack_map_is_full()
43 return head == qs->tail; in queue_stack_map_is_full()
68 struct bpf_queue_stack *qs; in queue_stack_map_alloc() local
72 queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc()
74 qs = bpf_map_area_alloc(queue_size, numa_node); in queue_stack_map_alloc()
75 if (!qs) in queue_stack_map_alloc()
[all …]
/linux/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c726 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) in init_qset_cntxt() argument
728 qs->rspq.cntxt_id = id; in init_qset_cntxt()
729 qs->fl[0].cntxt_id = 2 * id; in init_qset_cntxt()
730 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt()
731 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; in init_qset_cntxt()
732 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; in init_qset_cntxt()
733 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; in init_qset_cntxt()
734 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; in init_qset_cntxt()
735 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; in init_qset_cntxt()
1246 struct sge_qset *qs, struct sge_txq *q) in t3_stop_tx_queue() argument
[all …]
H A Dcxgb3_main.c409 adap->sge.qs[qidx]. in request_msix_data_irqs()
412 &adap->sge.qs[qidx]); in request_msix_data_irqs()
416 &adap->sge.qs[qidx]); in request_msix_data_irqs()
436 &adapter->sge.qs[i]); in free_irq_resources()
446 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { in await_mgmt_replies()
459 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; in init_tp_parity()
595 struct sge_qset *qs = &adap->sge.qs[i]; in ring_dbs() local
597 if (qs->adap) in ring_dbs()
599 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); in ring_dbs()
608 struct sge_qset *qs = &adap->sge.qs[i]; in init_napi() local
[all …]
H A Dadapter.h68 struct sge_qset *qs; member
216 struct sge_qset qs[SGE_QSETS]; member
325 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
/linux/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_queues.c381 struct queue_set *qs = nic->qs; in nicvf_refill_rbdr() local
382 int rbdr_idx = qs->rbdr_cnt; in nicvf_refill_rbdr()
394 rbdr = &qs->rbdr[rbdr_idx]; in nicvf_refill_rbdr()
403 if (qcount >= (qs->rbdr_len - 1)) in nicvf_refill_rbdr()
406 refill_rb_cnt = qs->rbdr_len - qcount - 1; in nicvf_refill_rbdr()
627 struct queue_set *qs, int qidx) in nicvf_reclaim_snd_queue() argument
639 struct queue_set *qs, int qidx) in nicvf_reclaim_rcv_queue() argument
649 struct queue_set *qs, int qidx) in nicvf_reclaim_cmp_queue() argument
744 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, in nicvf_rcv_queue_config() argument
751 rq = &qs->rq[qidx]; in nicvf_rcv_queue_config()
[all …]
H A Dnicvf_ethtool.c217 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings()
225 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings()
282 (nic->qs->rq_cnt + nic->qs->sq_cnt); in nicvf_get_sset_count()
290 (snic->qs->rq_cnt + snic->qs->sq_cnt); in nicvf_get_sset_count()
306 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats()
309 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats()
313 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats()
316 *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats) in nicvf_get_qset_stats()
475 struct queue_set *qs = nic->qs; in nicvf_get_ringparam() local
478 ring->rx_pending = qs->cq_len; in nicvf_get_ringparam()
[all …]
H A Dnicvf_main.c319 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; in nicvf_config_cpi()
441 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS; in nicvf_request_sqs()
444 nic->snicvf[sqs]->qs->rq_cnt = rx_queues; in nicvf_request_sqs()
449 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS; in nicvf_request_sqs()
452 nic->snicvf[sqs]->qs->sq_cnt = tx_queues; in nicvf_request_sqs()
456 nic->snicvf[sqs]->qs->cq_cnt = in nicvf_request_sqs()
457 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt); in nicvf_request_sqs()
667 sq = &nic->qs->sq[cqe_tx->sq_idx]; in nicvf_snd_pkt_handler()
852 struct queue_set *qs = nic->qs; in nicvf_cq_intr_handler() local
853 struct cmp_queue *cq = &qs->cq[cq_idx]; in nicvf_cq_intr_handler()
[all …]
/linux/drivers/net/ethernet/intel/idpf/
H A Didpf_virtchnl.c743 qp = kzalloc_flex(*qp, qs, num); in idpf_alloc_queue_set()
819 static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs) in idpf_wait_for_marker_event_set() argument
825 for (u32 i = 0; i < qs->num; i++) { in idpf_wait_for_marker_event_set()
826 switch (qs->qs[i].type) { in idpf_wait_for_marker_event_set()
828 txq = qs->qs[i].txq; in idpf_wait_for_marker_event_set()
858 struct idpf_queue_set *qs __free(kfree) = NULL; in idpf_wait_for_marker_event()
860 qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc, in idpf_wait_for_marker_event()
862 if (!qs) in idpf_wait_for_marker_event()
865 for (u32 i = 0; i < qs->num; i++) { in idpf_wait_for_marker_event()
866 qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_wait_for_marker_event()
[all …]
H A Didpf_txrx.c964 const struct idpf_queue_set *qs) in idpf_init_queue_set() argument
969 splitq = idpf_is_queue_model_split(qs->qv_rsrc->rxq_model); in idpf_init_queue_set()
971 for (u32 i = 0; i < qs->num; i++) { in idpf_init_queue_set()
972 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_init_queue_set()
1037 static void idpf_clean_queue_set(const struct idpf_queue_set *qs) in idpf_clean_queue_set() argument
1039 const struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc; in idpf_clean_queue_set()
1041 for (u32 i = 0; i < qs->num; i++) { in idpf_clean_queue_set()
1042 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_clean_queue_set()
1120 struct idpf_queue_set *qs; in idpf_vector_to_queue_set() local
1128 qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc, in idpf_vector_to_queue_set()
[all …]
H A Didpf_virtchnl.h146 struct idpf_queue_ptr qs[] __counted_by(num);
153 int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
154 int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
155 int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
/linux/drivers/soc/qcom/
H A Dsocinfo.c862 struct qcom_socinfo *qs; in qcom_socinfo_probe() local
873 qs = devm_kzalloc(&pdev->dev, sizeof(*qs), GFP_KERNEL); in qcom_socinfo_probe()
874 if (!qs) in qcom_socinfo_probe()
877 qs->attr.family = "Snapdragon"; in qcom_socinfo_probe()
878 qs->attr.machine = socinfo_machine(&pdev->dev, in qcom_socinfo_probe()
880 qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", in qcom_socinfo_probe()
882 qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", in qcom_socinfo_probe()
885 if (!qs->attr.soc_id || !qs->attr.revision) in qcom_socinfo_probe()
889 qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL, in qcom_socinfo_probe()
892 if (!qs->attr.serial_number) in qcom_socinfo_probe()
[all …]
/linux/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dcxgb4vf_main.c367 int qs, msi; in name_msix_vecs() local
369 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { in name_msix_vecs()
371 "%s-%d", dev->name, qs); in name_msix_vecs()
634 int qs; in setup_sge_queues() local
636 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues()
644 netdev_get_tx_queue(dev, qs), in setup_sge_queues()
649 rxq->rspq.idx = qs; in setup_sge_queues()
665 int qs; in setup_sge_queues() local
667 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues()
707 int qs, err; in setup_rss() local
[all …]
/linux/fs/qnx4/
H A Dinode.c47 struct qnx4_sb_info *qs; in qnx4_reconfigure() local
50 qs = qnx4_sb(sb); in qnx4_reconfigure()
51 qs->Version = QNX4_VERSION; in qnx4_reconfigure()
197 struct qnx4_sb_info *qs; in qnx4_fill_super() local
200 qs = kzalloc_obj(struct qnx4_sb_info); in qnx4_fill_super()
201 if (!qs) in qnx4_fill_super()
203 s->s_fs_info = qs; in qnx4_fill_super()
259 struct qnx4_sb_info *qs = qnx4_sb(sb); in qnx4_kill_sb() local
261 if (qs) { in qnx4_kill_sb()
262 kfree(qs->BitMap); in qnx4_kill_sb()
[all …]
/linux/drivers/net/ethernet/hisilicon/hns/
H A Dhns_ae_adapt.c84 vf_cb = kzalloc_flex(*vf_cb, ae_handle.qs, qnum_per_vf); in hns_ae_get_handle()
111 ae_handle->qs[i] = &ring_pair_cb->q; in hns_ae_get_handle()
112 ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i]; in hns_ae_get_handle()
113 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; in hns_ae_get_handle()
143 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; in hns_ae_put_handle()
157 ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]); in hns_ae_wait_flow_down()
189 hns_rcb_ring_enable_hw(handle->qs[i], val); in hns_ae_ring_enable_all()
315 q = handle->qs[i]; in hns_ae_set_mtu()
343 hns_rcb_int_clr_hw(handle->qs[k], in hns_ae_start()
346 hns_rcbv2_int_clr_hw(handle->qs[k], in hns_ae_start()
[all …]
H A Dhnae.c298 hnae_fini_queue(handle->qs[i]); in hnae_reinit_handle()
304 ret = hnae_init_queue(handle, handle->qs[i], handle->dev); in hnae_reinit_handle()
311 hnae_fini_queue(handle->qs[j]); in hnae_reinit_handle()
350 ret = hnae_init_queue(handle, handle->qs[i], dev); in hnae_get_handle()
363 hnae_fini_queue(handle->qs[j]); in hnae_get_handle()
377 hnae_fini_queue(h->qs[i]); in hnae_put_handle()
H A Dhns_dsaf_rcb.c36 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) in hns_rcb_wait_fbd_clean() argument
45 fbd_num += dsaf_read_dev(qs[i], in hns_rcb_wait_fbd_clean()
48 fbd_num += dsaf_read_dev(qs[i], in hns_rcb_wait_fbd_clean()
57 dev_err(qs[i]->handle->owner_dev, in hns_rcb_wait_fbd_clean()
61 int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs) in hns_rcb_wait_tx_ring_clean() argument
66 tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL); in hns_rcb_wait_tx_ring_clean()
69 head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD); in hns_rcb_wait_tx_ring_clean()
77 dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n"); in hns_rcb_wait_tx_ring_clean()
H A Dhns_dsaf_rcb.h133 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
134 int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
/linux/fs/qnx6/
H A Dinode.c299 struct qnx6_sb_info *qs; in qnx6_fill_super() local
305 qs = kzalloc_obj(struct qnx6_sb_info); in qnx6_fill_super()
306 if (!qs) in qnx6_fill_super()
308 s->s_fs_info = qs; in qnx6_fill_super()
309 qs->s_mount_opt = ctx->s_mount_opts; in qnx6_fill_super()
317 if (qs->s_mount_opt == QNX6_MOUNT_MMI_FS) { in qnx6_fill_super()
469 kfree(qs); in qnx6_fill_super()
476 struct qnx6_sb_info *qs = QNX6_SB(sb); in qnx6_put_super() local
477 brelse(qs->sb_buf); in qnx6_put_super()
478 iput(qs->longfile); in qnx6_put_super()
[all …]
/linux/drivers/gpu/nova-core/gsp/
H A Dfw.rs63 pub(in crate::gsp) fn gsp_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { in gsp_write_ptr()
65 || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap() in gsp_write_ptr()
68 pub(in crate::gsp) fn gsp_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { in gsp_read_ptr()
70 || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap() in gsp_read_ptr()
73 pub(in crate::gsp) fn cpu_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { in cpu_read_ptr()
75 || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap() in cpu_read_ptr()
78 pub(in crate::gsp) fn advance_cpu_read_ptr(qs: &CoherentAllocation<GspMem>, count: u32) { in advance_cpu_read_ptr()
79 let rptr = cpu_read_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES; in advance_cpu_read_ptr()
86 dma_write!(qs, [0]?.cpuq.rx.0.readPtr, rptr); in advance_cpu_read_ptr()
92 pub(in crate::gsp) fn cpu_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { in cpu_write_ptr()
[all …]
/linux/drivers/scsi/elx/efct/
H A Defct_hw_queues.c199 struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT]; in efct_hw_new_cq_set() local
217 qs[i] = cq->queue; in efct_hw_new_cq_set()
222 if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) { in efct_hw_new_cq_set()
315 struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL }; in efct_hw_new_rq_set() local
342 qs[q_count] = rq->hdr; in efct_hw_new_rq_set()
349 qs[q_count + 1] = rq->data; in efct_hw_new_rq_set()
354 if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs, in efct_hw_new_rq_set()
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_ethtool.c375 struct otx2_qset *qs = &pfvf->qset; in otx2_get_ringparam() local
378 ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256); in otx2_get_ringparam()
380 ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); in otx2_get_ringparam()
395 struct otx2_qset *qs = &pfvf->qset; in otx2_set_ringparam() local
441 if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt && in otx2_set_ringparam()
449 qs->sqe_cnt = tx_count; in otx2_set_ringparam()
450 qs->rqe_cnt = rx_count; in otx2_set_ringparam()
/linux/net/sched/
H A Dsch_fq_codel.c649 struct gnet_stats_queue qs = { 0 }; in fq_codel_dump_class_stats() local
676 qs.qlen++; in fq_codel_dump_class_stats()
681 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats()
682 qs.drops = 0; in fq_codel_dump_class_stats()
684 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) in fq_codel_dump_class_stats()
/linux/tools/testing/selftests/drivers/net/
H A Dstats.py98 for qs in stats:
99 if qs["ifindex"]== test.ifindex:
100 return qs

12