Lines Matching +full:tx +full:- +full:rings +full:- +full:empty

1 // SPDX-License-Identifier: GPL-2.0-only
7 * idpf_get_rxnfc - command to get RX flow classification rules
22 switch (cmd->cmd) { in idpf_get_rxnfc()
24 cmd->data = vport->num_rxq; in idpf_get_rxnfc()
34 return -EOPNOTSUPP; in idpf_get_rxnfc()
38 * idpf_get_rxfh_key_size - get the RSS hash key size
48 if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) in idpf_get_rxfh_key_size()
49 return -EOPNOTSUPP; in idpf_get_rxfh_key_size()
51 user_config = &np->adapter->vport_config[np->vport_idx]->user_config; in idpf_get_rxfh_key_size()
53 return user_config->rss_data.rss_key_size; in idpf_get_rxfh_key_size()
57 * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
67 if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) in idpf_get_rxfh_indir_size()
68 return -EOPNOTSUPP; in idpf_get_rxfh_indir_size()
70 user_config = &np->adapter->vport_config[np->vport_idx]->user_config; in idpf_get_rxfh_indir_size()
72 return user_config->rss_data.rss_lut_size; in idpf_get_rxfh_indir_size()
76 * idpf_get_rxfh - get the rx flow hash indirection table
93 adapter = np->adapter; in idpf_get_rxfh()
96 err = -EOPNOTSUPP; in idpf_get_rxfh()
100 rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data; in idpf_get_rxfh()
101 if (np->state != __IDPF_VPORT_UP) in idpf_get_rxfh()
104 rxfh->hfunc = ETH_RSS_HASH_TOP; in idpf_get_rxfh()
106 if (rxfh->key) in idpf_get_rxfh()
107 memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size); in idpf_get_rxfh()
109 if (rxfh->indir) { in idpf_get_rxfh()
110 for (i = 0; i < rss_data->rss_lut_size; i++) in idpf_get_rxfh()
111 rxfh->indir[i] = rss_data->rss_lut[i]; in idpf_get_rxfh()
121 * idpf_set_rxfh - set the rx flow hash indirection table
126 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
143 adapter = vport->adapter; in idpf_set_rxfh()
146 err = -EOPNOTSUPP; in idpf_set_rxfh()
150 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_set_rxfh()
151 if (np->state != __IDPF_VPORT_UP) in idpf_set_rxfh()
154 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in idpf_set_rxfh()
155 rxfh->hfunc != ETH_RSS_HASH_TOP) { in idpf_set_rxfh()
156 err = -EOPNOTSUPP; in idpf_set_rxfh()
160 if (rxfh->key) in idpf_set_rxfh()
161 memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size); in idpf_set_rxfh()
163 if (rxfh->indir) { in idpf_set_rxfh()
164 for (lut = 0; lut < rss_data->rss_lut_size; lut++) in idpf_set_rxfh()
165 rss_data->rss_lut[lut] = rxfh->indir[lut]; in idpf_set_rxfh()
181 * Report maximum of TX and RX. Report one extra channel to match our MailBox
192 vport_config = np->adapter->vport_config[np->vport_idx]; in idpf_get_channels()
194 num_txq = vport_config->user_config.num_req_tx_qs; in idpf_get_channels()
195 num_rxq = vport_config->user_config.num_req_rx_qs; in idpf_get_channels()
200 ch->max_combined = min_t(u16, vport_config->max_q.max_txq, in idpf_get_channels()
201 vport_config->max_q.max_rxq); in idpf_get_channels()
202 ch->max_rx = vport_config->max_q.max_rxq; in idpf_get_channels()
203 ch->max_tx = vport_config->max_q.max_txq; in idpf_get_channels()
205 ch->max_other = IDPF_MAX_MBXQ; in idpf_get_channels()
206 ch->other_count = IDPF_MAX_MBXQ; in idpf_get_channels()
208 ch->combined_count = combined; in idpf_get_channels()
209 ch->rx_count = num_rxq - combined; in idpf_get_channels()
210 ch->tx_count = num_txq - combined; in idpf_get_channels()
236 idx = vport->idx; in idpf_set_channels()
237 vport_config = vport->adapter->vport_config[idx]; in idpf_set_channels()
239 num_txq = vport_config->user_config.num_req_tx_qs; in idpf_set_channels()
240 num_rxq = vport_config->user_config.num_req_rx_qs; in idpf_set_channels()
245 * value on cmd line but we get non-zero value anyway via in idpf_set_channels()
249 if (ch->combined_count == combined) in idpf_set_channels()
250 ch->combined_count = 0; in idpf_set_channels()
251 if (ch->combined_count && ch->rx_count == num_rxq - combined) in idpf_set_channels()
252 ch->rx_count = 0; in idpf_set_channels()
253 if (ch->combined_count && ch->tx_count == num_txq - combined) in idpf_set_channels()
254 ch->tx_count = 0; in idpf_set_channels()
256 num_req_tx_q = ch->combined_count + ch->tx_count; in idpf_set_channels()
257 num_req_rx_q = ch->combined_count + ch->rx_count; in idpf_set_channels()
259 dev = &vport->adapter->pdev->dev; in idpf_set_channels()
261 * Stack checks max combined_count and max [tx|rx]_count but not the in idpf_set_channels()
262 * max combined_count + [tx|rx]_count. These checks should catch that. in idpf_set_channels()
264 if (num_req_tx_q > vport_config->max_q.max_txq) { in idpf_set_channels()
265 dev_info(dev, "Maximum TX queues is %d\n", in idpf_set_channels()
266 vport_config->max_q.max_txq); in idpf_set_channels()
267 err = -EINVAL; in idpf_set_channels()
270 if (num_req_rx_q > vport_config->max_q.max_rxq) { in idpf_set_channels()
272 vport_config->max_q.max_rxq); in idpf_set_channels()
273 err = -EINVAL; in idpf_set_channels()
280 vport_config->user_config.num_req_tx_qs = num_req_tx_q; in idpf_set_channels()
281 vport_config->user_config.num_req_rx_qs = num_req_rx_q; in idpf_set_channels()
286 vport_config->user_config.num_req_tx_qs = num_txq; in idpf_set_channels()
287 vport_config->user_config.num_req_rx_qs = num_rxq; in idpf_set_channels()
297 * idpf_get_ringparam - Get ring parameters
303 * Returns current ring parameters. TX and RX rings are reported separately,
304 * but the number of rings is not reported.
316 ring->rx_max_pending = IDPF_MAX_RXQ_DESC; in idpf_get_ringparam()
317 ring->tx_max_pending = IDPF_MAX_TXQ_DESC; in idpf_get_ringparam()
318 ring->rx_pending = vport->rxq_desc_count; in idpf_get_ringparam()
319 ring->tx_pending = vport->txq_desc_count; in idpf_get_ringparam()
321 kring->tcp_data_split = idpf_vport_get_hsplit(vport); in idpf_get_ringparam()
327 * idpf_set_ringparam - Set ring parameters
333 * Sets ring parameters. TX and RX rings are controlled separately, but the
334 * number of rings is not specified, so all rings get the same settings.
350 idx = vport->idx; in idpf_set_ringparam()
352 if (ring->tx_pending < IDPF_MIN_TXQ_DESC) { in idpf_set_ringparam()
353 netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n", in idpf_set_ringparam()
354 ring->tx_pending, in idpf_set_ringparam()
356 err = -EINVAL; in idpf_set_ringparam()
360 if (ring->rx_pending < IDPF_MIN_RXQ_DESC) { in idpf_set_ringparam()
362 ring->rx_pending, in idpf_set_ringparam()
364 err = -EINVAL; in idpf_set_ringparam()
368 new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE); in idpf_set_ringparam()
369 if (new_rx_count != ring->rx_pending) in idpf_set_ringparam()
373 new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE); in idpf_set_ringparam()
374 if (new_tx_count != ring->tx_pending) in idpf_set_ringparam()
375 netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n", in idpf_set_ringparam()
378 if (new_tx_count == vport->txq_desc_count && in idpf_set_ringparam()
379 new_rx_count == vport->rxq_desc_count) in idpf_set_ringparam()
382 if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) { in idpf_set_ringparam()
385 err = -EOPNOTSUPP; in idpf_set_ringparam()
390 config_data = &vport->adapter->vport_config[idx]->user_config; in idpf_set_ringparam()
391 config_data->num_req_txq_desc = new_tx_count; in idpf_set_ringparam()
392 config_data->num_req_rxq_desc = new_rx_count; in idpf_set_ringparam()
397 for (i = 0; i < vport->num_bufqs_per_qgrp; i++) in idpf_set_ringparam()
398 vport->bufq_desc_count[i] = in idpf_set_ringparam()
400 vport->num_bufqs_per_qgrp); in idpf_set_ringparam()
411 * struct idpf_stats - definition for an ethtool statistic
412 * @stat_string: statistic name to display in ethtool -S output
452 /* Stats associated with a Tx queue */
454 IDPF_QUEUE_STAT("pkts", q_stats.tx.packets),
455 IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes),
456 IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts),
473 IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
474 IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
475 IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
476 IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
477 IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
478 IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
479 IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
480 IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
481 IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
482 IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
483 IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
484 IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
485 IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
486 IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
487 IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
493 * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
510 ethtool_sprintf(p, "%s_q-%u_%s", in __idpf_add_qstat_strings()
515 * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
532 * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
547 * idpf_get_stat_strings - Get stat strings
562 vport_config = np->adapter->vport_config[np->vport_idx]; in idpf_get_stat_strings()
567 for (i = 0; i < vport_config->max_q.max_txq; i++) in idpf_get_stat_strings()
569 "tx", i); in idpf_get_stat_strings()
571 for (i = 0; i < vport_config->max_q.max_rxq; i++) in idpf_get_stat_strings()
579 * idpf_get_strings - Get string set
598 * idpf_get_sset_count - Get length of string set
612 return -EINVAL; in idpf_get_sset_count()
614 vport_config = np->adapter->vport_config[np->vport_idx]; in idpf_get_sset_count()
625 max_txq = vport_config->max_q.max_txq; in idpf_get_sset_count()
626 max_rxq = vport_config->max_q.max_rxq; in idpf_get_sset_count()
636 * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
657 p = (char *)pstat + stat->stat_offset; in idpf_add_one_ethtool_stat()
658 switch (stat->sizeof_stat) { in idpf_add_one_ethtool_stat()
673 stat->stat_string); in idpf_add_one_ethtool_stat()
679 * idpf_add_queue_stats - copy queue statistics into supplied buffer
699 if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { in idpf_add_queue_stats()
712 start = u64_stats_fetch_begin(&q->stats_sync); in idpf_add_queue_stats()
715 } while (u64_stats_fetch_retry(&q->stats_sync, start)); in idpf_add_queue_stats()
722 * idpf_add_empty_queue_stats - Add stats for a non-existent queue
729 * changed between those calls. This adds empty to data to the stats since we
748 * idpf_add_port_stats - Copy port stats into ethtool buffer
759 start = u64_stats_fetch_begin(&vport->port_stats.stats_sync); in idpf_add_port_stats()
763 } while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start)); in idpf_add_port_stats()
769 * idpf_collect_queue_stats - accumulate various per queue stats
775 struct idpf_port_stats *pstats = &vport->port_stats; in idpf_collect_queue_stats()
781 u64_stats_update_begin(&pstats->stats_sync); in idpf_collect_queue_stats()
782 u64_stats_set(&pstats->rx_hw_csum_err, 0); in idpf_collect_queue_stats()
783 u64_stats_set(&pstats->rx_hsplit, 0); in idpf_collect_queue_stats()
784 u64_stats_set(&pstats->rx_hsplit_hbo, 0); in idpf_collect_queue_stats()
785 u64_stats_set(&pstats->rx_bad_descs, 0); in idpf_collect_queue_stats()
786 u64_stats_set(&pstats->tx_linearize, 0); in idpf_collect_queue_stats()
787 u64_stats_set(&pstats->tx_busy, 0); in idpf_collect_queue_stats()
788 u64_stats_set(&pstats->tx_drops, 0); in idpf_collect_queue_stats()
789 u64_stats_set(&pstats->tx_dma_map_errs, 0); in idpf_collect_queue_stats()
790 u64_stats_update_end(&pstats->stats_sync); in idpf_collect_queue_stats()
792 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_collect_queue_stats()
793 struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; in idpf_collect_queue_stats()
796 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_collect_queue_stats()
797 num_rxq = rxq_grp->splitq.num_rxq_sets; in idpf_collect_queue_stats()
799 num_rxq = rxq_grp->singleq.num_rxq; in idpf_collect_queue_stats()
807 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_collect_queue_stats()
808 rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; in idpf_collect_queue_stats()
810 rxq = rxq_grp->singleq.rxqs[j]; in idpf_collect_queue_stats()
816 start = u64_stats_fetch_begin(&rxq->stats_sync); in idpf_collect_queue_stats()
818 stats = &rxq->q_stats.rx; in idpf_collect_queue_stats()
819 hw_csum_err = u64_stats_read(&stats->hw_csum_err); in idpf_collect_queue_stats()
820 hsplit = u64_stats_read(&stats->hsplit_pkts); in idpf_collect_queue_stats()
821 hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf); in idpf_collect_queue_stats()
822 bad_descs = u64_stats_read(&stats->bad_descs); in idpf_collect_queue_stats()
823 } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); in idpf_collect_queue_stats()
825 u64_stats_update_begin(&pstats->stats_sync); in idpf_collect_queue_stats()
826 u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err); in idpf_collect_queue_stats()
827 u64_stats_add(&pstats->rx_hsplit, hsplit); in idpf_collect_queue_stats()
828 u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo); in idpf_collect_queue_stats()
829 u64_stats_add(&pstats->rx_bad_descs, bad_descs); in idpf_collect_queue_stats()
830 u64_stats_update_end(&pstats->stats_sync); in idpf_collect_queue_stats()
834 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_collect_queue_stats()
835 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; in idpf_collect_queue_stats()
837 for (j = 0; j < txq_grp->num_txq; j++) { in idpf_collect_queue_stats()
839 struct idpf_queue *txq = txq_grp->txqs[j]; in idpf_collect_queue_stats()
847 start = u64_stats_fetch_begin(&txq->stats_sync); in idpf_collect_queue_stats()
849 stats = &txq->q_stats.tx; in idpf_collect_queue_stats()
850 linearize = u64_stats_read(&stats->linearize); in idpf_collect_queue_stats()
851 qbusy = u64_stats_read(&stats->q_busy); in idpf_collect_queue_stats()
852 skb_drops = u64_stats_read(&stats->skb_drops); in idpf_collect_queue_stats()
853 dma_map_errs = u64_stats_read(&stats->dma_map_errs); in idpf_collect_queue_stats()
854 } while (u64_stats_fetch_retry(&txq->stats_sync, start)); in idpf_collect_queue_stats()
856 u64_stats_update_begin(&pstats->stats_sync); in idpf_collect_queue_stats()
857 u64_stats_add(&pstats->tx_linearize, linearize); in idpf_collect_queue_stats()
858 u64_stats_add(&pstats->tx_busy, qbusy); in idpf_collect_queue_stats()
859 u64_stats_add(&pstats->tx_drops, skb_drops); in idpf_collect_queue_stats()
860 u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs); in idpf_collect_queue_stats()
861 u64_stats_update_end(&pstats->stats_sync); in idpf_collect_queue_stats()
867 * idpf_get_ethtool_stats - report device statistics
890 if (np->state != __IDPF_VPORT_UP) { in idpf_get_ethtool_stats()
901 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_get_ethtool_stats()
902 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; in idpf_get_ethtool_stats()
906 for (j = 0; j < txq_grp->num_txq; j++, total++) { in idpf_get_ethtool_stats()
907 struct idpf_queue *txq = txq_grp->txqs[j]; in idpf_get_ethtool_stats()
916 vport_config = vport->adapter->vport_config[vport->idx]; in idpf_get_ethtool_stats()
922 for (; total < vport_config->max_q.max_txq; total++) in idpf_get_ethtool_stats()
926 is_splitq = idpf_is_queue_model_split(vport->rxq_model); in idpf_get_ethtool_stats()
928 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_get_ethtool_stats()
929 struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; in idpf_get_ethtool_stats()
935 num_rxq = rxq_grp->splitq.num_rxq_sets; in idpf_get_ethtool_stats()
937 num_rxq = rxq_grp->singleq.num_rxq; in idpf_get_ethtool_stats()
943 rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; in idpf_get_ethtool_stats()
945 rxq = rxq_grp->singleq.rxqs[j]; in idpf_get_ethtool_stats()
958 page_pool_get_stats(rxq->pp, &pp_stats); in idpf_get_ethtool_stats()
962 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_get_ethtool_stats()
963 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_get_ethtool_stats()
965 &vport->rxq_grps[i].splitq.bufq_sets[j].bufq; in idpf_get_ethtool_stats()
967 page_pool_get_stats(rxbufq->pp, &pp_stats); in idpf_get_ethtool_stats()
971 for (; total < vport_config->max_q.max_rxq; total++) in idpf_get_ethtool_stats()
982 * idpf_find_rxq - find rxq from q index
992 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_find_rxq()
993 return vport->rxq_grps->singleq.rxqs[q_num]; in idpf_find_rxq()
998 return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq; in idpf_find_rxq()
1002 * idpf_find_txq - find txq from q index
1006 * returns pointer to tx queue
1012 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_find_txq()
1013 return vport->txqs[q_num]; in idpf_find_txq()
1017 return vport->txq_grps[q_grp].complq; in idpf_find_txq()
1021 * __idpf_get_q_coalesce - get ITR values for specific queue
1023 * @q: quuee of Rx or Tx
1028 if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { in __idpf_get_q_coalesce()
1029 ec->use_adaptive_rx_coalesce = in __idpf_get_q_coalesce()
1030 IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode); in __idpf_get_q_coalesce()
1031 ec->rx_coalesce_usecs = q->q_vector->rx_itr_value; in __idpf_get_q_coalesce()
1033 ec->use_adaptive_tx_coalesce = in __idpf_get_q_coalesce()
1034 IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode); in __idpf_get_q_coalesce()
1035 ec->tx_coalesce_usecs = q->q_vector->tx_itr_value; in __idpf_get_q_coalesce()
1040 * idpf_get_q_coalesce - get ITR values for specific queue
1058 if (np->state != __IDPF_VPORT_UP) in idpf_get_q_coalesce()
1061 if (q_num >= vport->num_rxq && q_num >= vport->num_txq) { in idpf_get_q_coalesce()
1062 err = -EINVAL; in idpf_get_q_coalesce()
1066 if (q_num < vport->num_rxq) in idpf_get_q_coalesce()
1069 if (q_num < vport->num_txq) in idpf_get_q_coalesce()
1079 * idpf_get_coalesce - get ITR values as requested by user
1097 * idpf_get_per_q_coalesce - get ITR values as requested by user
1112 * __idpf_set_q_coalesce - set ITR values for specific queue
1123 struct idpf_q_vector *qv = q->q_vector; in __idpf_set_q_coalesce()
1128 is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); in __idpf_set_q_coalesce()
1129 use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; in __idpf_set_q_coalesce()
1130 coalesce_usecs = ec->rx_coalesce_usecs; in __idpf_set_q_coalesce()
1131 itr_val = qv->rx_itr_value; in __idpf_set_q_coalesce()
1133 is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); in __idpf_set_q_coalesce()
1134 use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; in __idpf_set_q_coalesce()
1135 coalesce_usecs = ec->tx_coalesce_usecs; in __idpf_set_q_coalesce()
1136 itr_val = qv->tx_itr_value; in __idpf_set_q_coalesce()
1139 netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); in __idpf_set_q_coalesce()
1141 return -EINVAL; in __idpf_set_q_coalesce()
1148 netdev_err(q->vport->netdev, in __idpf_set_q_coalesce()
1149 "Invalid value, %d-usecs range is 0-%d\n", in __idpf_set_q_coalesce()
1152 return -EINVAL; in __idpf_set_q_coalesce()
1156 coalesce_usecs--; in __idpf_set_q_coalesce()
1157 netdev_info(q->vport->netdev, in __idpf_set_q_coalesce()
1163 qv->rx_itr_value = coalesce_usecs; in __idpf_set_q_coalesce()
1165 qv->rx_intr_mode = IDPF_ITR_DYNAMIC; in __idpf_set_q_coalesce()
1167 qv->rx_intr_mode = !IDPF_ITR_DYNAMIC; in __idpf_set_q_coalesce()
1168 idpf_vport_intr_write_itr(qv, qv->rx_itr_value, in __idpf_set_q_coalesce()
1172 qv->tx_itr_value = coalesce_usecs; in __idpf_set_q_coalesce()
1174 qv->tx_intr_mode = IDPF_ITR_DYNAMIC; in __idpf_set_q_coalesce()
1176 qv->tx_intr_mode = !IDPF_ITR_DYNAMIC; in __idpf_set_q_coalesce()
1177 idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true); in __idpf_set_q_coalesce()
1188 * idpf_set_q_coalesce - set ITR values for specific queue
1205 return -EINVAL; in idpf_set_q_coalesce()
1211 * idpf_set_coalesce - set ITR values as requested by user
1231 if (np->state != __IDPF_VPORT_UP) in idpf_set_coalesce()
1234 for (i = 0; i < vport->num_txq; i++) { in idpf_set_coalesce()
1240 for (i = 0; i < vport->num_rxq; i++) { in idpf_set_coalesce()
1253 * idpf_set_per_q_coalesce - set ITR values as requested by user
1284 * idpf_get_msglevel - Get debug message level
1293 return adapter->msg_enable; in idpf_get_msglevel()
1297 * idpf_set_msglevel - Set debug message level
1308 adapter->msg_enable = data; in idpf_set_msglevel()
1312 * idpf_get_link_ksettings - Get Link Speed and Duplex settings
1327 cmd->base.autoneg = AUTONEG_DISABLE; in idpf_get_link_ksettings()
1328 cmd->base.port = PORT_NONE; in idpf_get_link_ksettings()
1329 if (vport->link_up) { in idpf_get_link_ksettings()
1330 cmd->base.duplex = DUPLEX_FULL; in idpf_get_link_ksettings()
1331 cmd->base.speed = vport->link_speed_mbps; in idpf_get_link_ksettings()
1333 cmd->base.duplex = DUPLEX_UNKNOWN; in idpf_get_link_ksettings()
1334 cmd->base.speed = SPEED_UNKNOWN; in idpf_get_link_ksettings()
1369 * idpf_set_ethtool_ops - Initialize ethtool ops struct
1377 netdev->ethtool_ops = &idpf_ethtool_ops; in idpf_set_ethtool_ops()