1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/pci.h> 9 #include <linux/etherdevice.h> 10 #include <linux/vmalloc.h> 11 12 #include "octep_config.h" 13 #include "octep_main.h" 14 15 /* Reset various index of Tx queue data structure. */ 16 static void octep_iq_reset_indices(struct octep_iq *iq) 17 { 18 iq->fill_cnt = 0; 19 iq->host_write_index = 0; 20 iq->octep_read_index = 0; 21 iq->flush_index = 0; 22 iq->pkts_processed = 0; 23 iq->pkt_in_done = 0; 24 } 25 26 /** 27 * octep_iq_process_completions() - Process Tx queue completions. 28 * 29 * @iq: Octeon Tx queue data structure. 30 * @budget: max number of completions to be processed in one invocation. 31 */ 32 int octep_iq_process_completions(struct octep_iq *iq, u16 budget) 33 { 34 u32 compl_pkts, compl_bytes, compl_sg; 35 struct octep_device *oct = iq->octep_dev; 36 struct octep_tx_buffer *tx_buffer; 37 struct skb_shared_info *shinfo; 38 u32 fi = iq->flush_index; 39 struct sk_buff *skb; 40 u8 frags, i; 41 42 compl_pkts = 0; 43 compl_sg = 0; 44 compl_bytes = 0; 45 iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq); 46 47 while (likely(budget && (fi != iq->octep_read_index))) { 48 tx_buffer = iq->buff_info + fi; 49 skb = tx_buffer->skb; 50 51 fi++; 52 if (unlikely(fi == iq->max_count)) 53 fi = 0; 54 compl_bytes += skb->len; 55 compl_pkts++; 56 budget--; 57 58 if (!tx_buffer->gather) { 59 dma_unmap_single(iq->dev, tx_buffer->dma, 60 tx_buffer->skb->len, DMA_TO_DEVICE); 61 dev_kfree_skb_any(skb); 62 continue; 63 } 64 65 /* Scatter/Gather */ 66 shinfo = skb_shinfo(skb); 67 frags = shinfo->nr_frags; 68 compl_sg++; 69 70 dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], 71 tx_buffer->sglist[0].len[3], DMA_TO_DEVICE); 72 73 i = 1; /* entry 0 is main skb, unmapped above */ 74 while (frags--) { 75 dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], 76 tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); 77 i++; 78 } 79 80 dev_kfree_skb_any(skb); 81 } 82 83 iq->pkts_processed += compl_pkts; 84 iq->stats->instr_completed += compl_pkts; 85 iq->stats->bytes_sent += compl_bytes; 86 iq->stats->sgentry_sent += compl_sg; 87 iq->flush_index = fi; 88 89 netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes); 90 91 if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) && 92 (IQ_INSTR_SPACE(iq) > 93 OCTEP_WAKE_QUEUE_THRESHOLD)) 94 netif_wake_subqueue(iq->netdev, iq->q_no); 95 return !budget; 96 } 97 98 /** 99 * octep_iq_free_pending() - Free Tx buffers for pending completions. 100 * 101 * @iq: Octeon Tx queue data structure. 102 */ 103 static void octep_iq_free_pending(struct octep_iq *iq) 104 { 105 struct octep_tx_buffer *tx_buffer; 106 struct skb_shared_info *shinfo; 107 u32 fi = iq->flush_index; 108 struct sk_buff *skb; 109 u8 frags, i; 110 111 while (fi != iq->host_write_index) { 112 tx_buffer = iq->buff_info + fi; 113 skb = tx_buffer->skb; 114 115 fi++; 116 if (unlikely(fi == iq->max_count)) 117 fi = 0; 118 119 if (!tx_buffer->gather) { 120 dma_unmap_single(iq->dev, tx_buffer->dma, 121 tx_buffer->skb->len, DMA_TO_DEVICE); 122 dev_kfree_skb_any(skb); 123 continue; 124 } 125 126 /* Scatter/Gather */ 127 shinfo = skb_shinfo(skb); 128 frags = shinfo->nr_frags; 129 130 dma_unmap_single(iq->dev, 131 tx_buffer->sglist[0].dma_ptr[0], 132 tx_buffer->sglist[0].len[3], 133 DMA_TO_DEVICE); 134 135 i = 1; /* entry 0 is main skb, unmapped above */ 136 while (frags--) { 137 dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], 138 tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); 139 i++; 140 } 141 142 dev_kfree_skb_any(skb); 143 } 144 145 iq->flush_index = fi; 146 netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); 147 } 148 149 /** 150 * octep_clean_iqs() - Clean Tx queues to shutdown the device. 151 * 152 * @oct: Octeon device private data structure. 153 * 154 * Free the buffers in Tx queue descriptors pending completion and 155 * reset queue indices 156 */ 157 void octep_clean_iqs(struct octep_device *oct) 158 { 159 int i; 160 161 for (i = 0; i < oct->num_iqs; i++) { 162 octep_iq_free_pending(oct->iq[i]); 163 octep_iq_reset_indices(oct->iq[i]); 164 } 165 } 166 167 /** 168 * octep_setup_iq() - Setup a Tx queue. 169 * 170 * @oct: Octeon device private data structure. 171 * @q_no: Tx queue number to be setup. 172 * 173 * Allocate resources for a Tx queue. 174 */ 175 static int octep_setup_iq(struct octep_device *oct, int q_no) 176 { 177 u32 desc_ring_size, buff_info_size, sglist_size; 178 struct octep_iq *iq; 179 int i; 180 181 iq = vzalloc(sizeof(*iq)); 182 if (!iq) 183 goto iq_alloc_err; 184 oct->iq[q_no] = iq; 185 186 iq->octep_dev = oct; 187 iq->netdev = oct->netdev; 188 iq->dev = &oct->pdev->dev; 189 iq->q_no = q_no; 190 iq->stats = &oct->stats_iq[q_no]; 191 iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); 192 iq->ring_size_mask = iq->max_count - 1; 193 iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); 194 iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); 195 196 /* Allocate memory for hardware queue descriptors */ 197 desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); 198 iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size, 199 &iq->desc_ring_dma, GFP_KERNEL); 200 if (unlikely(!iq->desc_ring)) { 201 dev_err(iq->dev, 202 "Failed to allocate DMA memory for IQ-%d\n", q_no); 203 goto desc_dma_alloc_err; 204 } 205 206 /* Allocate memory for hardware SGLIST descriptors */ 207 sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * 208 CFG_GET_IQ_NUM_DESC(oct->conf); 209 iq->sglist = dma_alloc_coherent(iq->dev, sglist_size, 210 &iq->sglist_dma, GFP_KERNEL); 211 if (unlikely(!iq->sglist)) { 212 dev_err(iq->dev, 213 "Failed to allocate DMA memory for IQ-%d SGLIST\n", 214 q_no); 215 goto sglist_alloc_err; 216 } 217 218 /* allocate memory to manage Tx packets pending completion */ 219 buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count; 220 iq->buff_info = vzalloc(buff_info_size); 221 if (!iq->buff_info) { 222 dev_err(iq->dev, 223 "Failed to allocate buff info for IQ-%d\n", q_no); 224 goto buff_info_err; 225 } 226 227 /* Setup sglist addresses in tx_buffer entries */ 228 for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) { 229 struct octep_tx_buffer *tx_buffer; 230 231 tx_buffer = &iq->buff_info[i]; 232 tx_buffer->sglist = 233 &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT]; 234 tx_buffer->sglist_dma = 235 iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT); 236 } 237 238 octep_iq_reset_indices(iq); 239 oct->hw_ops.setup_iq_regs(oct, q_no); 240 241 oct->num_iqs++; 242 return 0; 243 244 buff_info_err: 245 dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma); 246 sglist_alloc_err: 247 dma_free_coherent(iq->dev, desc_ring_size, 248 iq->desc_ring, iq->desc_ring_dma); 249 desc_dma_alloc_err: 250 vfree(iq); 251 oct->iq[q_no] = NULL; 252 iq_alloc_err: 253 return -1; 254 } 255 256 /** 257 * octep_free_iq() - Free Tx queue resources. 258 * 259 * @iq: Octeon Tx queue data structure. 260 * 261 * Free all the resources allocated for a Tx queue. 262 */ 263 static void octep_free_iq(struct octep_iq *iq) 264 { 265 struct octep_device *oct = iq->octep_dev; 266 u64 desc_ring_size, sglist_size; 267 int q_no = iq->q_no; 268 269 desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); 270 271 vfree(iq->buff_info); 272 273 if (iq->desc_ring) 274 dma_free_coherent(iq->dev, desc_ring_size, 275 iq->desc_ring, iq->desc_ring_dma); 276 277 sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * 278 CFG_GET_IQ_NUM_DESC(oct->conf); 279 if (iq->sglist) 280 dma_free_coherent(iq->dev, sglist_size, 281 iq->sglist, iq->sglist_dma); 282 283 vfree(iq); 284 oct->iq[q_no] = NULL; 285 oct->num_iqs--; 286 } 287 288 /** 289 * octep_setup_iqs() - setup resources for all Tx queues. 290 * 291 * @oct: Octeon device private data structure. 292 */ 293 int octep_setup_iqs(struct octep_device *oct) 294 { 295 int i; 296 297 oct->num_iqs = 0; 298 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 299 if (octep_setup_iq(oct, i)) { 300 dev_err(&oct->pdev->dev, 301 "Failed to setup IQ(TxQ)-%d.\n", i); 302 goto iq_setup_err; 303 } 304 dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i); 305 } 306 307 return 0; 308 309 iq_setup_err: 310 while (i) { 311 i--; 312 octep_free_iq(oct->iq[i]); 313 } 314 return -1; 315 } 316 317 /** 318 * octep_free_iqs() - Free resources of all Tx queues. 319 * 320 * @oct: Octeon device private data structure. 321 */ 322 void octep_free_iqs(struct octep_device *oct) 323 { 324 int i; 325 326 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 327 octep_free_iq(oct->iq[i]); 328 dev_dbg(&oct->pdev->dev, 329 "Successfully destroyed IQ(TxQ)-%d.\n", i); 330 } 331 oct->num_iqs = 0; 332 } 333