Lines Matching defs:fbn

55  * @fbn: netdev priv of the FB NIC
63 static __maybe_unused u64 fbnic_ts40_to_ns(struct fbnic_net *fbn, u64 ts40)
72 s = u64_stats_fetch_begin(&fbn->time_seq);
73 offset = READ_ONCE(fbn->time_offset);
74 } while (u64_stats_fetch_retry(&fbn->time_seq, s));
76 high = READ_ONCE(fbn->time_high);
167 struct fbnic_net *fbn;
172 fbn = netdev_priv(skb->dev);
173 if (fbn->hwtstamp_config.tx_type == HWTSTAMP_TX_OFF)
438 struct fbnic_net *fbn = netdev_priv(dev);
441 return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
614 struct fbnic_net *fbn;
638 fbn = netdev_priv(nv->napi.dev);
639 ns = fbnic_ts40_to_ns(fbn, FIELD_GET(FBNIC_TCD_TYPE1_TS_MASK, tcd));
1012 struct fbnic_net *fbn;
1018 fbn = netdev_priv(nv->napi.dev);
1020 ns = fbnic_ts40_to_ns(fbn, ts);
1211 void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
1217 fbn->rx_stats.bytes += stats->bytes;
1218 fbn->rx_stats.packets += stats->packets;
1219 fbn->rx_stats.dropped += stats->dropped;
1220 fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
1221 fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
1222 fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
1224 BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3);
1227 void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
1233 fbn->tx_stats.bytes += stats->bytes;
1234 fbn->tx_stats.packets += stats->packets;
1235 fbn->tx_stats.dropped += stats->dropped;
1236 fbn->tx_stats.twq.csum_partial += stats->twq.csum_partial;
1237 fbn->tx_stats.twq.lso += stats->twq.lso;
1238 fbn->tx_stats.twq.ts_lost += stats->twq.ts_lost;
1239 fbn->tx_stats.twq.ts_packets += stats->twq.ts_packets;
1240 fbn->tx_stats.twq.stop += stats->twq.stop;
1241 fbn->tx_stats.twq.wake += stats->twq.wake;
1243 BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
1246 static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
1252 fbnic_aggregate_ring_tx_counters(fbn, txr);
1255 WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
1256 fbn->tx[txr->q_idx] = NULL;
1259 static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
1265 fbnic_aggregate_ring_rx_counters(fbn, rxr);
1268 WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
1269 fbn->rx[rxr->q_idx] = NULL;
1272 static void fbnic_free_napi_vector(struct fbnic_net *fbn,
1279 fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0);
1280 fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl);
1284 fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
1285 fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
1286 fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
1292 fbn->napi[fbnic_napi_idx(nv)] = NULL;
1296 void fbnic_free_napi_vectors(struct fbnic_net *fbn)
1300 for (i = 0; i < fbn->num_napi; i++)
1301 if (fbn->napi[i])
1302 fbnic_free_napi_vector(fbn, fbn->napi[i]);
1308 static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
1314 .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count,
1321 .netdev = fbn->netdev,
1355 static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
1389 fbn->napi[fbnic_napi_idx(nv)] = nv;
1390 netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
1401 err = fbnic_alloc_nv_page_pool(fbn, nv);
1423 fbn->tx[txq_idx] = &qt->sub0;
1454 fbn->rx[rxq_idx] = &qt->cmpl;
1470 fbn->napi[fbnic_napi_idx(nv)] = NULL;
1475 int fbnic_alloc_napi_vectors(struct fbnic_net *fbn)
1478 unsigned int num_tx = fbn->num_tx_queues;
1479 unsigned int num_rx = fbn->num_rx_queues;
1480 unsigned int num_napi = fbn->num_napi;
1481 struct fbnic_dev *fbd = fbn->fbd;
1487 err = fbnic_alloc_napi_vector(fbd, fbn,
1506 err = fbnic_alloc_napi_vector(fbd, fbn, num_napi, v_idx,
1524 fbnic_free_napi_vectors(fbn);
1544 static int fbnic_alloc_tx_ring_desc(struct fbnic_net *fbn,
1547 struct device *dev = fbn->netdev->dev.parent;
1551 size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096);
1559 txr->size_mask = fbn->txq_size - 1;
1574 static int fbnic_alloc_tx_ring_resources(struct fbnic_net *fbn,
1577 struct device *dev = fbn->netdev->dev.parent;
1583 err = fbnic_alloc_tx_ring_desc(fbn, txr);
1601 static int fbnic_alloc_rx_ring_desc(struct fbnic_net *fbn,
1604 struct device *dev = fbn->netdev->dev.parent;
1611 rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT;
1615 rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT;
1619 rxq_size = fbn->rcq_size;
1654 static int fbnic_alloc_rx_ring_resources(struct fbnic_net *fbn,
1657 struct device *dev = fbn->netdev->dev.parent;
1660 err = fbnic_alloc_rx_ring_desc(fbn, rxr);
1675 static void fbnic_free_qt_resources(struct fbnic_net *fbn,
1678 struct device *dev = fbn->netdev->dev.parent;
1685 static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
1688 struct device *dev = fbn->netdev->dev.parent;
1691 err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0);
1695 err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl);
1706 static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
1709 struct device *dev = fbn->netdev->dev.parent;
1712 err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
1716 err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
1720 err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl);
1733 static void fbnic_free_nv_resources(struct fbnic_net *fbn,
1740 fbnic_free_qt_resources(fbn, &nv->qt[i]);
1743 fbnic_free_qt_resources(fbn, &nv->qt[i]);
1746 static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
1753 err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]);
1760 err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]);
1769 fbnic_free_qt_resources(fbn, &nv->qt[i]);
1773 void fbnic_free_resources(struct fbnic_net *fbn)
1777 for (i = 0; i < fbn->num_napi; i++)
1778 fbnic_free_nv_resources(fbn, fbn->napi[i]);
1781 int fbnic_alloc_resources(struct fbnic_net *fbn)
1785 for (i = 0; i < fbn->num_napi; i++) {
1786 err = fbnic_alloc_nv_resources(fbn, fbn->napi[i]);
1795 fbnic_free_nv_resources(fbn, fbn->napi[i]);
1842 int fbnic_set_netif_queues(struct fbnic_net *fbn)
1846 err = netif_set_real_num_queues(fbn->netdev, fbn->num_tx_queues,
1847 fbn->num_rx_queues);
1851 for (i = 0; i < fbn->num_napi; i++)
1852 fbnic_set_netif_napi(fbn->napi[i]);
1857 void fbnic_reset_netif_queues(struct fbnic_net *fbn)
1861 for (i = 0; i < fbn->num_napi; i++)
1862 fbnic_reset_netif_napi(fbn->napi[i]);
1895 void fbnic_napi_disable(struct fbnic_net *fbn)
1899 for (i = 0; i < fbn->num_napi; i++) {
1900 napi_disable(&fbn->napi[i]->napi);
1902 fbnic_nv_irq_disable(fbn->napi[i]);
1906 void fbnic_disable(struct fbnic_net *fbn)
1908 struct fbnic_dev *fbd = fbn->fbd;
1911 for (i = 0; i < fbn->num_napi; i++) {
1912 struct fbnic_napi_vector *nv = fbn->napi[i];
2018 void fbnic_flush(struct fbnic_net *fbn)
2022 for (i = 0; i < fbn->num_napi; i++) {
2023 struct fbnic_napi_vector *nv = fbn->napi[i];
2064 void fbnic_fill(struct fbnic_net *fbn)
2068 for (i = 0; i < fbn->num_napi; i++) {
2069 struct fbnic_napi_vector *nv = fbn->napi[i];
2204 struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
2208 val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT, fbn->rx_usecs) |
2210 val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT, fbn->tx_usecs) |
2218 struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
2225 fbn->rx_max_frames *
2233 struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
2261 fbnic_config_rim_threshold(rcq, nv->v_idx, fbn->rx_max_frames *
2269 void fbnic_enable(struct fbnic_net *fbn)
2271 struct fbnic_dev *fbd = fbn->fbd;
2274 for (i = 0; i < fbn->num_napi; i++) {
2275 struct fbnic_napi_vector *nv = fbn->napi[i];
2304 void fbnic_napi_enable(struct fbnic_net *fbn)
2307 struct fbnic_dev *fbd = fbn->fbd;
2310 for (i = 0; i < fbn->num_napi; i++) {
2311 struct fbnic_napi_vector *nv = fbn->napi[i];
2338 struct fbnic_net *fbn = netdev_priv(netdev);
2340 struct fbnic_dev *fbd = fbn->fbd;
2343 for (i = 0; i < fbn->num_napi; i++) {
2344 struct fbnic_napi_vector *nv = fbn->napi[i];