Lines Matching refs:apc

51 static bool mana_en_need_log(struct mana_port_context *apc, int err)
53 if (apc && apc->ac && apc->ac->gdma_dev &&
54 apc->ac->gdma_dev->gdma_context)
55 return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
64 struct mana_port_context *apc = netdev_priv(ndev);
73 apc->port_is_up = true;
86 struct mana_port_context *apc = netdev_priv(ndev);
88 if (!apc->port_is_up)
134 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
139 struct gdma_dev *gd = apc->ac->gdma_dev;
193 netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
255 struct mana_port_context *apc = netdev_priv(ndev);
258 struct gdma_dev *gd = apc->ac->gdma_dev;
269 if (unlikely(!apc->port_is_up))
278 txq = &apc->tx_qp[txq_idx].txq;
280 cq = &apc->tx_qp[txq_idx].tx_cq;
410 if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
427 apc->eth_stats.stop_queue++;
457 apc->eth_stats.wake_queue++;
475 struct mana_port_context *apc = netdev_priv(ndev);
476 unsigned int num_queues = apc->num_queues;
483 if (!apc->port_is_up)
489 rx_stats = &apc->rxqs[q]->stats;
502 tx_stats = &apc->tx_qp[q].txq.stats;
518 struct mana_port_context *apc = netdev_priv(ndev);
523 txq = apc->indir_table[hash & (apc->indir_table_sz - 1)];
740 struct mana_port_context *apc = netdev_priv(binding->netdev);
749 if (apc->handle.id && shaper->handle.id != apc->handle.id) {
763 err = mana_query_link_cfg(apc);
764 old_speed = (err) ? SPEED_UNKNOWN : apc->speed;
767 err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE);
768 apc->speed = (err) ? old_speed : rate;
769 apc->handle = (err) ? apc->handle : shaper->handle;
779 struct mana_port_context *apc = netdev_priv(binding->netdev);
782 err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE);
786 apc->handle.id = 0;
787 apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC;
788 apc->speed = 0;
821 static void mana_cleanup_port_context(struct mana_port_context *apc)
827 debugfs_remove(apc->mana_port_debugfs);
828 apc->mana_port_debugfs = NULL;
829 kfree(apc->rxqs);
830 apc->rxqs = NULL;
833 static void mana_cleanup_indir_table(struct mana_port_context *apc)
835 apc->indir_table_sz = 0;
836 kfree(apc->indir_table);
837 kfree(apc->rxobj_table);
840 static int mana_init_port_context(struct mana_port_context *apc)
842 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
845 return !apc->rxqs ? -ENOMEM : 0;
901 static int mana_pf_register_hw_vport(struct mana_port_context *apc)
913 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
916 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
923 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
928 apc->port_handle = resp.hw_vport_handle;
932 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
940 req.hw_vport_handle = apc->port_handle;
942 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
945 if (mana_en_need_log(apc, err))
946 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
955 netdev_err(apc->ndev,
960 static int mana_pf_register_filter(struct mana_port_context *apc)
968 req.vport = apc->port_handle;
969 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
971 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
974 netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
981 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
986 apc->pf_filter_handle = resp.filter_handle;
990 static void mana_pf_deregister_filter(struct mana_port_context *apc)
998 req.filter_handle = apc->pf_filter_handle;
1000 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1003 if (mana_en_need_log(apc, err))
1004 netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
1013 netdev_err(apc->ndev,
1070 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
1082 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1102 netdev_warn(apc->ndev,
1104 MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx);
1108 apc->port_handle = resp.vport;
1109 ether_addr_copy(apc->mac_addr, resp.mac_addr);
1114 void mana_uncfg_vport(struct mana_port_context *apc)
1116 mutex_lock(&apc->vport_mutex);
1117 apc->vport_use_count--;
1118 WARN_ON(apc->vport_use_count < 0);
1119 mutex_unlock(&apc->vport_mutex);
1123 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
1148 mutex_lock(&apc->vport_mutex);
1149 if (apc->vport_use_count > 0) {
1150 mutex_unlock(&apc->vport_mutex);
1153 apc->vport_use_count++;
1154 mutex_unlock(&apc->vport_mutex);
1158 req.vport = apc->port_handle;
1162 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1165 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1172 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1180 apc->tx_shortform_allowed = resp.short_form_allowed;
1181 apc->tx_vp_offset = resp.tx_vport_offset;
1183 netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
1184 apc->port_handle, protection_dom_id, doorbell_pg_id);
1187 mana_uncfg_vport(apc);
1193 static int mana_cfg_vport_steering(struct mana_port_context *apc,
1200 struct net_device *ndev = apc->ndev;
1204 req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz);
1214 req->vport = apc->port_handle;
1215 req->num_indir_entries = apc->indir_table_sz;
1219 req->rss_enable = apc->rss_state;
1223 req->default_rxobj = apc->default_rxobj;
1227 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1230 memcpy(req->indir_tab, apc->rxobj_table,
1233 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1236 if (mana_en_need_log(apc, err))
1256 apc->port_handle, apc->indir_table_sz);
1262 int mana_query_link_cfg(struct mana_port_context *apc)
1264 struct net_device *ndev = apc->ndev;
1272 req.vport = apc->port_handle;
1275 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1302 apc->speed = resp.link_speed_mbps;
1303 apc->max_speed = resp.qos_speed_mbps;
1307 int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
1312 struct net_device *ndev = apc->ndev;
1317 req.vport = apc->port_handle;
1321 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1351 int mana_create_wq_obj(struct mana_port_context *apc,
1359 struct net_device *ndev = apc->ndev;
1373 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1406 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1411 struct net_device *ndev = apc->ndev;
1419 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1422 if (mana_en_need_log(apc, err))
1510 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1522 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1525 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
1532 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1541 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
1549 static void mana_fence_rqs(struct mana_port_context *apc)
1555 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1556 rxq = apc->rxqs[rxq_idx];
1557 err = mana_fence_rq(apc, rxq);
1580 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1583 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1606 struct mana_port_context *apc;
1617 apc = netdev_priv(ndev);
1653 apc->eth_stats.tx_cqe_err++;
1664 apc->eth_stats.tx_cqe_unknown_type++;
1678 mana_unmap_skb(skb, apc);
1699 /* Ensure checking txq_stopped before apc->port_is_up. */
1702 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1704 apc->eth_stats.wake_queue++;
1923 struct mana_port_context *apc;
1929 apc = netdev_priv(ndev);
1943 apc->eth_stats.rx_coalesced_err++;
1953 apc->eth_stats.rx_cqe_unknown_type++;
2068 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
2070 struct gdma_dev *gd = apc->ac->gdma_dev;
2078 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
2080 struct gdma_dev *gd = apc->ac->gdma_dev;
2088 static void mana_destroy_txq(struct mana_port_context *apc)
2093 if (!apc->tx_qp)
2096 for (i = 0; i < apc->num_queues; i++) {
2097 debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
2098 apc->tx_qp[i].mana_tx_debugfs = NULL;
2100 napi = &apc->tx_qp[i].tx_cq.napi;
2101 if (apc->tx_qp[i].txq.napi_initialized) {
2107 apc->tx_qp[i].txq.napi_initialized = false;
2109 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
2111 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
2113 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
2116 kfree(apc->tx_qp);
2117 apc->tx_qp = NULL;
2120 static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx)
2122 struct mana_tx_qp *tx_qp = &apc->tx_qp[idx];
2126 tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2145 static int mana_create_txq(struct mana_port_context *apc,
2148 struct mana_context *ac = apc->ac;
2161 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
2163 if (!apc->tx_qp)
2167 * apc->tx_queue_size represents the maximum number of WQEs
2171 * as min val of apc->tx_queue_size is 128 and that would make
2172 * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
2175 txq_size = apc->tx_queue_size * 32;
2177 cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
2181 for (i = 0; i < apc->num_queues; i++) {
2182 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
2185 txq = &apc->tx_qp[i].txq;
2190 txq->vp_offset = apc->tx_vp_offset;
2203 cq = &apc->tx_qp[i].tx_cq;
2230 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2232 &apc->tx_qp[i].tx_object);
2256 mana_create_txq_debugfs(apc, i);
2271 apc->num_queues, err);
2272 mana_destroy_txq(apc);
2276 static void mana_destroy_rxq(struct mana_port_context *apc,
2280 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2305 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2307 mana_deinit_cq(apc, &rxq->rx_cq);
2368 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2371 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2388 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2449 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2453 struct gdma_dev *gd = apc->ac->gdma_dev;
2465 rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size),
2471 rxq->num_rx_buf = apc->rx_queue_size;
2485 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2527 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2572 mana_destroy_rxq(apc, rxq, false);
2575 mana_deinit_cq(apc, cq);
2580 static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx)
2585 rxq = apc->rxqs[idx];
2588 rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2602 static int mana_add_rx_queues(struct mana_port_context *apc,
2605 struct mana_context *ac = apc->ac;
2610 for (i = 0; i < apc->num_queues; i++) {
2611 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2620 apc->rxqs[i] = rxq;
2622 mana_create_rxq_debugfs(apc, i);
2625 apc->default_rxobj = apc->rxqs[0]->rxobj;
2630 static void mana_destroy_vport(struct mana_port_context *apc)
2632 struct gdma_dev *gd = apc->ac->gdma_dev;
2636 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2637 rxq = apc->rxqs[rxq_idx];
2641 mana_destroy_rxq(apc, rxq, true);
2642 apc->rxqs[rxq_idx] = NULL;
2645 mana_destroy_txq(apc);
2646 mana_uncfg_vport(apc);
2648 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
2649 mana_pf_deregister_hw_vport(apc);
2652 static int mana_create_vport(struct mana_port_context *apc,
2655 struct gdma_dev *gd = apc->ac->gdma_dev;
2658 apc->default_rxobj = INVALID_MANA_HANDLE;
2660 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
2661 err = mana_pf_register_hw_vport(apc);
2666 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2670 return mana_create_txq(apc, net);
2673 static int mana_rss_table_alloc(struct mana_port_context *apc)
2675 if (!apc->indir_table_sz) {
2676 netdev_err(apc->ndev,
2678 apc->port_idx);
2682 apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
2683 if (!apc->indir_table)
2686 apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL);
2687 if (!apc->rxobj_table) {
2688 kfree(apc->indir_table);
2695 static void mana_rss_table_init(struct mana_port_context *apc)
2699 for (i = 0; i < apc->indir_table_sz; i++)
2700 apc->indir_table[i] =
2701 ethtool_rxfh_indir_default(i, apc->num_queues);
2704 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2712 for (i = 0; i < apc->indir_table_sz; i++) {
2713 queue_idx = apc->indir_table[i];
2714 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2718 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2722 mana_fence_rqs(apc);
2727 void mana_query_gf_stats(struct mana_port_context *apc)
2731 struct net_device *ndev = apc->ndev;
2765 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
2779 apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
2780 apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
2781 apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
2782 apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
2783 apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
2784 apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
2785 apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
2786 apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
2787 apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
2788 apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
2789 apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
2790 apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
2792 apc->eth_stats.hc_tx_err_vlan_enforcement =
2794 apc->eth_stats.hc_tx_err_eth_type_enforcement =
2796 apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
2797 apc->eth_stats.hc_tx_err_sqpdid_enforcement =
2799 apc->eth_stats.hc_tx_err_cqpdid_enforcement =
2801 apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
2802 apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
2803 apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
2804 apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
2805 apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
2806 apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
2807 apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
2808 apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
2809 apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
2810 apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
2813 void mana_query_phy_stats(struct mana_port_context *apc)
2817 struct net_device *ndev = apc->ndev;
2822 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
2837 apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
2838 apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
2841 apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
2842 apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
2843 apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
2844 apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
2845 apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
2846 apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
2847 apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
2848 apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
2849 apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
2850 apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
2851 apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
2852 apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
2853 apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
2854 apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
2855 apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
2856 apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
2859 apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
2860 apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
2861 apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
2862 apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
2863 apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
2864 apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
2865 apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
2866 apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
2867 apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
2868 apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
2869 apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
2870 apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
2871 apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
2872 apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
2873 apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
2874 apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
2877 apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
2878 apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
2879 apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
2880 apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
2881 apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
2882 apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
2883 apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
2884 apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
2885 apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
2886 apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
2887 apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
2888 apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
2889 apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
2890 apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
2891 apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
2892 apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
2897 struct mana_port_context *apc = netdev_priv(ndev);
2898 struct gdma_dev *gd = apc->ac->gdma_dev;
2900 int port_idx = apc->port_idx;
2905 err = mana_init_port_context(apc);
2911 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2912 &apc->indir_table_sz);
2920 if (apc->max_queues > max_queues)
2921 apc->max_queues = max_queues;
2923 if (apc->num_queues > apc->max_queues)
2924 apc->num_queues = apc->max_queues;
2926 eth_hw_addr_set(ndev, apc->mac_addr);
2928 apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs);
2932 mana_cleanup_port_context(apc);
2938 struct mana_port_context *apc = netdev_priv(ndev);
2939 struct gdma_dev *gd = apc->ac->gdma_dev;
2942 err = mana_create_vport(apc, ndev);
2944 netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
2948 err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
2952 apc->num_queues, err);
2956 err = mana_add_rx_queues(apc, ndev);
2960 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2962 err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
2966 apc->num_queues, err);
2970 mana_rss_table_init(apc);
2972 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2978 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
2979 err = mana_pf_register_filter(apc);
2984 mana_chn_setxdp(apc, mana_xdp_get(apc));
2989 mana_destroy_vport(apc);
2995 struct mana_port_context *apc = netdev_priv(ndev);
3004 if (apc->port_st_save) {
3007 mana_cleanup_port_context(apc);
3012 apc->port_is_up = apc->port_st_save;
3017 if (apc->port_is_up)
3027 struct mana_port_context *apc = netdev_priv(ndev);
3029 struct gdma_dev *gd = apc->ac->gdma_dev;
3035 if (apc->port_is_up)
3038 mana_chn_setxdp(apc, NULL);
3040 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
3041 mana_pf_deregister_filter(apc);
3043 /* No packet can be transmitted now since apc->port_is_up is false.
3045 * a txq because it may not timely see apc->port_is_up being cleared
3047 * new packets due to apc->port_is_up being false.
3056 for (i = 0; i < apc->num_queues; i++) {
3057 txq = &apc->tx_qp[i].txq;
3075 for (i = 0; i < apc->num_queues; i++) {
3076 txq = &apc->tx_qp[i].txq;
3078 mana_unmap_skb(skb, apc);
3087 apc->rss_state = TRI_STATE_FALSE;
3088 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
3089 if (err && mana_en_need_log(apc, err))
3093 mana_destroy_vport(apc);
3100 struct mana_port_context *apc = netdev_priv(ndev);
3105 apc->port_st_save = apc->port_is_up;
3106 apc->port_is_up = false;
3114 if (apc->port_st_save) {
3124 mana_cleanup_port_context(apc);
3134 struct mana_port_context *apc;
3145 apc = netdev_priv(ndev);
3146 apc->ac = ac;
3147 apc->ndev = ndev;
3148 apc->max_queues = gc->max_num_queues;
3149 apc->num_queues = gc->max_num_queues;
3150 apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
3151 apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
3152 apc->port_handle = INVALID_MANA_HANDLE;
3153 apc->pf_filter_handle = INVALID_MANA_HANDLE;
3154 apc->port_idx = port_idx;
3156 mutex_init(&apc->vport_mutex);
3157 apc->vport_use_count = 0;
3172 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
3178 err = mana_rss_table_alloc(apc);
3201 debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed);
3206 mana_cleanup_indir_table(apc);
3208 mana_cleanup_port_context(apc);
3452 struct mana_port_context *apc;
3464 apc = netdev_priv(ndev);
3488 mana_cleanup_indir_table(apc);