Lines Matching refs:tx_ring

742 static int fm10k_tso(struct fm10k_ring *tx_ring,
775 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
782 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
784 netdev_err(tx_ring->netdev,
789 static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
811 dev_warn(tx_ring->dev,
813 tx_ring->tx_stats.csum_err++;
854 dev_warn(tx_ring->dev,
859 tx_ring->tx_stats.csum_err++;
865 tx_ring->tx_stats.csum_good++;
869 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
891 static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
905 return i == tx_ring->count;
908 static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
910 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
916 if (likely(fm10k_desc_unused(tx_ring) < size))
920 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
921 ++tx_ring->tx_stats.restart_queue;
925 static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
927 if (likely(fm10k_desc_unused(tx_ring) >= size))
929 return __fm10k_maybe_stop_tx(tx_ring, size);
932 static void fm10k_tx_map(struct fm10k_ring *tx_ring,
943 u16 i = tx_ring->next_to_use;
946 tx_desc = FM10K_TX_DESC(tx_ring, i);
957 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
963 if (dma_mapping_error(tx_ring->dev, dma))
971 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
973 tx_desc = FM10K_TX_DESC(tx_ring, 0);
984 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
986 tx_desc = FM10K_TX_DESC(tx_ring, 0);
993 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
996 tx_buffer = &tx_ring->tx_buffer[i];
1002 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
1006 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1023 tx_ring->next_to_use = i;
1026 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
1029 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1030 writel(i, tx_ring->tail);
1035 dev_err(tx_ring->dev, "TX DMA map failed\n");
1039 tx_buffer = &tx_ring->tx_buffer[i];
1040 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1044 i = tx_ring->count;
1048 tx_ring->next_to_use = i;
1052 struct fm10k_ring *tx_ring)
1071 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
1072 tx_ring->tx_stats.tx_busy++;
1077 first = &tx_ring->tx_buffer[tx_ring->next_to_use];
1085 tso = fm10k_tso(tx_ring, first);
1089 fm10k_tx_csum(tx_ring, first);
1091 fm10k_tx_map(tx_ring, first);
1129 bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
1131 u32 tx_done = fm10k_get_tx_completed(tx_ring);
1132 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1133 u32 tx_pending = fm10k_get_tx_pending(tx_ring, true);
1135 clear_check_for_tx_hang(tx_ring);
1147 tx_ring->tx_stats.tx_done_old = tx_done;
1149 clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
1155 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
1175 * @tx_ring: tx ring to clean
1179 struct fm10k_ring *tx_ring, int napi_budget)
1186 unsigned int i = tx_ring->next_to_clean;
1191 tx_buffer = &tx_ring->tx_buffer[i];
1192 tx_desc = FM10K_TX_DESC(tx_ring, i);
1193 i -= tx_ring->count;
1220 dma_unmap_single(tx_ring->dev,
1235 i -= tx_ring->count;
1236 tx_buffer = tx_ring->tx_buffer;
1237 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1242 dma_unmap_page(tx_ring->dev,
1255 i -= tx_ring->count;
1256 tx_buffer = tx_ring->tx_buffer;
1257 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1267 i += tx_ring->count;
1268 tx_ring->next_to_clean = i;
1269 u64_stats_update_begin(&tx_ring->syncp);
1270 tx_ring->stats.bytes += total_bytes;
1271 tx_ring->stats.packets += total_packets;
1272 u64_stats_update_end(&tx_ring->syncp);
1276 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
1280 netif_err(interface, drv, tx_ring->netdev,
1286 tx_ring->queue_index,
1287 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
1288 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
1289 tx_ring->next_to_use, i);
1291 netif_stop_subqueue(tx_ring->netdev,
1292 tx_ring->queue_index);
1294 netif_info(interface, probe, tx_ring->netdev,
1297 tx_ring->queue_index);
1306 netdev_tx_completed_queue(txring_txq(tx_ring),
1310 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1311 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1316 if (__netif_subqueue_stopped(tx_ring->netdev,
1317 tx_ring->queue_index) &&
1319 netif_wake_subqueue(tx_ring->netdev,
1320 tx_ring->queue_index);
1321 ++tx_ring->tx_stats.restart_queue;
1635 interface->tx_ring[txr_idx] = ring;
1697 interface->tx_ring[ring->queue_index] = NULL;
1876 interface->tx_ring[offset + i]->reg_idx = q_idx;
1877 interface->tx_ring[offset + i]->qos_pc = pc;
1901 interface->tx_ring[i]->reg_idx = i;