Lines Matching +full:queue +full:- +full:rx

1 // SPDX-License-Identifier: GPL-2.0
17 oq->host_read_idx = 0; in octep_oq_reset_indices()
18 oq->host_refill_idx = 0; in octep_oq_reset_indices()
19 oq->refill_count = 0; in octep_oq_reset_indices()
20 oq->last_pkt_count = 0; in octep_oq_reset_indices()
21 oq->pkts_pending = 0; in octep_oq_reset_indices()
25 * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
27 * @oq: Octeon Rx queue data structure.
30 * -1, if failed to allocate a buffer or failed to map for DMA.
34 struct octep_oq_desc_hw *desc_ring = oq->desc_ring; in octep_oq_fill_ring_buffers()
38 for (i = 0; i < oq->max_count; i++) { in octep_oq_fill_ring_buffers()
41 dev_err(oq->dev, "Rx buffer alloc failed\n"); in octep_oq_fill_ring_buffers()
44 desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0, in octep_oq_fill_ring_buffers()
47 if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) { in octep_oq_fill_ring_buffers()
48 dev_err(oq->dev, in octep_oq_fill_ring_buffers()
49 "OQ-%d buffer alloc: DMA mapping error!\n", in octep_oq_fill_ring_buffers()
50 oq->q_no); in octep_oq_fill_ring_buffers()
54 oq->buff_info[i].page = page; in octep_oq_fill_ring_buffers()
62 i--; in octep_oq_fill_ring_buffers()
63 dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); in octep_oq_fill_ring_buffers()
64 put_page(oq->buff_info[i].page); in octep_oq_fill_ring_buffers()
65 oq->buff_info[i].page = NULL; in octep_oq_fill_ring_buffers()
68 return -1; in octep_oq_fill_ring_buffers()
72 * octep_oq_refill() - refill buffers for used Rx ring descriptors.
75 * @oq: Octeon Rx queue data structure.
81 struct octep_oq_desc_hw *desc_ring = oq->desc_ring; in octep_oq_refill()
85 refill_idx = oq->host_refill_idx; in octep_oq_refill()
86 for (i = 0; i < oq->refill_count; i++) { in octep_oq_refill()
89 dev_err(oq->dev, "refill: rx buffer alloc failed\n"); in octep_oq_refill()
90 oq->stats->alloc_failures++; in octep_oq_refill()
94 desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0, in octep_oq_refill()
96 if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) { in octep_oq_refill()
97 dev_err(oq->dev, in octep_oq_refill()
98 "OQ-%d buffer refill: DMA mapping error!\n", in octep_oq_refill()
99 oq->q_no); in octep_oq_refill()
101 oq->stats->alloc_failures++; in octep_oq_refill()
104 oq->buff_info[refill_idx].page = page; in octep_oq_refill()
106 if (refill_idx == oq->max_count) in octep_oq_refill()
109 oq->host_refill_idx = refill_idx; in octep_oq_refill()
110 oq->refill_count -= i; in octep_oq_refill()
116 * octep_setup_oq() - Setup a Rx queue.
119 * @q_no: Rx queue number to be setup.
121 * Allocate resources for a Rx queue.
131 oct->oq[q_no] = oq; in octep_setup_oq()
133 oq->octep_dev = oct; in octep_setup_oq()
134 oq->netdev = oct->netdev; in octep_setup_oq()
135 oq->dev = &oct->pdev->dev; in octep_setup_oq()
136 oq->q_no = q_no; in octep_setup_oq()
137 oq->stats = &oct->stats_oq[q_no]; in octep_setup_oq()
138 oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); in octep_setup_oq()
139 oq->ring_size_mask = oq->max_count - 1; in octep_setup_oq()
140 oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); in octep_setup_oq()
141 oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE; in octep_setup_oq()
144 * additional header is filled-in by Octeon after length field in in octep_setup_oq()
145 * Rx packets. this header contains additional packet information. in octep_setup_oq()
147 if (oct->conf->fw_info.rx_ol_flags) in octep_setup_oq()
148 oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE; in octep_setup_oq()
150 oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf); in octep_setup_oq()
152 desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE; in octep_setup_oq()
153 oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size, in octep_setup_oq()
154 &oq->desc_ring_dma, GFP_KERNEL); in octep_setup_oq()
156 if (unlikely(!oq->desc_ring)) { in octep_setup_oq()
157 dev_err(oq->dev, in octep_setup_oq()
158 "Failed to allocate DMA memory for OQ-%d !!\n", q_no); in octep_setup_oq()
162 oq->buff_info = vcalloc(oq->max_count, OCTEP_OQ_RECVBUF_SIZE); in octep_setup_oq()
163 if (unlikely(!oq->buff_info)) { in octep_setup_oq()
164 dev_err(&oct->pdev->dev, in octep_setup_oq()
165 "Failed to allocate buffer info for OQ-%d\n", q_no); in octep_setup_oq()
173 oct->hw_ops.setup_oq_regs(oct, q_no); in octep_setup_oq()
174 oct->num_oqs++; in octep_setup_oq()
179 vfree(oq->buff_info); in octep_setup_oq()
180 oq->buff_info = NULL; in octep_setup_oq()
182 dma_free_coherent(oq->dev, desc_ring_size, in octep_setup_oq()
183 oq->desc_ring, oq->desc_ring_dma); in octep_setup_oq()
184 oq->desc_ring = NULL; in octep_setup_oq()
187 oct->oq[q_no] = NULL; in octep_setup_oq()
189 return -1; in octep_setup_oq()
193 * octep_oq_free_ring_buffers() - Free ring buffers.
195 * @oq: Octeon Rx queue data structure.
197 * Free receive buffers in unused Rx queue descriptors.
201 struct octep_oq_desc_hw *desc_ring = oq->desc_ring; in octep_oq_free_ring_buffers()
204 if (!oq->desc_ring || !oq->buff_info) in octep_oq_free_ring_buffers()
207 for (i = 0; i < oq->max_count; i++) { in octep_oq_free_ring_buffers()
208 if (oq->buff_info[i].page) { in octep_oq_free_ring_buffers()
209 dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, in octep_oq_free_ring_buffers()
211 put_page(oq->buff_info[i].page); in octep_oq_free_ring_buffers()
212 oq->buff_info[i].page = NULL; in octep_oq_free_ring_buffers()
220 * octep_free_oq() - Free Rx queue resources.
222 * @oq: Octeon Rx queue data structure.
224 * Free all resources of a Rx queue.
228 struct octep_device *oct = oq->octep_dev; in octep_free_oq()
229 int q_no = oq->q_no; in octep_free_oq()
233 vfree(oq->buff_info); in octep_free_oq()
235 if (oq->desc_ring) in octep_free_oq()
236 dma_free_coherent(oq->dev, in octep_free_oq()
237 oq->max_count * OCTEP_OQ_DESC_SIZE, in octep_free_oq()
238 oq->desc_ring, oq->desc_ring_dma); in octep_free_oq()
241 oct->oq[q_no] = NULL; in octep_free_oq()
242 oct->num_oqs--; in octep_free_oq()
247 * octep_setup_oqs() - setup resources for all Rx queues.
255 oct->num_oqs = 0; in octep_setup_oqs()
256 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { in octep_setup_oqs()
259 dev_err(&oct->pdev->dev, in octep_setup_oqs()
260 "Failed to setup OQ(RxQ)-%d.\n", i); in octep_setup_oqs()
263 dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i); in octep_setup_oqs()
270 i--; in octep_setup_oqs()
271 octep_free_oq(oct->oq[i]); in octep_setup_oqs()
273 return -1; in octep_setup_oqs()
277 * octep_oq_dbell_init() - Initialize Rx queue doorbell.
281 * Write number of descriptors to Rx queue doorbell register.
287 for (i = 0; i < oct->num_oqs; i++) in octep_oq_dbell_init()
288 writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); in octep_oq_dbell_init()
292 * octep_free_oqs() - Free resources of all Rx queues.
300 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { in octep_free_oqs()
301 if (!oct->oq[i]) in octep_free_oqs()
303 octep_free_oq(oct->oq[i]); in octep_free_oqs()
304 dev_dbg(&oct->pdev->dev, in octep_free_oqs()
305 "Successfully freed OQ(RxQ)-%d.\n", i); in octep_free_oqs()
310 * octep_oq_check_hw_for_pkts() - Check for new Rx packets.
313 * @oq: Octeon Rx queue data structure.
322 pkt_count = readl(oq->pkts_sent_reg); in octep_oq_check_hw_for_pkts()
323 new_pkts = pkt_count - oq->last_pkt_count; in octep_oq_check_hw_for_pkts()
325 /* Clear the hardware packets counter register if the rx queue is in octep_oq_check_hw_for_pkts()
326 * being processed continuously with-in a single interrupt and in octep_oq_check_hw_for_pkts()
331 writel(pkt_count, oq->pkts_sent_reg); in octep_oq_check_hw_for_pkts()
332 pkt_count = readl(oq->pkts_sent_reg); in octep_oq_check_hw_for_pkts()
335 oq->last_pkt_count = pkt_count; in octep_oq_check_hw_for_pkts()
336 oq->pkts_pending += new_pkts; in octep_oq_check_hw_for_pkts()
341 * octep_oq_next_pkt() - Move to the next packet in Rx queue.
343 * @oq: Octeon Rx queue data structure.
355 dma_unmap_page(oq->dev, oq->desc_ring[*read_idx].buffer_ptr, in octep_oq_next_pkt()
357 buff_info->page = NULL; in octep_oq_next_pkt()
360 if (*read_idx == oq->max_count) in octep_oq_next_pkt()
365 * octep_oq_drop_rx() - Free the resources associated with a packet.
367 * @oq: Octeon Rx queue data structure.
377 int data_len = buff_info->len - oq->max_single_buffer_size; in octep_oq_drop_rx()
381 data_len -= oq->buffer_size; in octep_oq_drop_rx()
386 * __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
389 * @oq: Octeon Rx queue data structure.
392 * Process the new packets in Rx queue.
393 * Packets larger than single Rx buffer arrive in consecutive descriptors.
402 netdev_features_t feat = oq->netdev->features; in __octep_oq_process_rx()
411 read_idx = oq->host_read_idx; in __octep_oq_process_rx()
415 buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx]; in __octep_oq_process_rx()
416 resp_hw = page_address(buff_info->page); in __octep_oq_process_rx()
418 /* Swap the length field that is in Big-Endian to CPU */ in __octep_oq_process_rx()
419 buff_info->len = be64_to_cpu(resp_hw->length); in __octep_oq_process_rx()
420 if (oct->conf->fw_info.rx_ol_flags) { in __octep_oq_process_rx()
426 buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE; in __octep_oq_process_rx()
432 rx_ol_flags = resp_hw_ext->rx_ol_flags; in __octep_oq_process_rx()
435 * Hardware Rx response header. in __octep_oq_process_rx()
447 oq->stats->alloc_failures++; in __octep_oq_process_rx()
452 rx_bytes += buff_info->len; in __octep_oq_process_rx()
454 if (buff_info->len <= oq->max_single_buffer_size) { in __octep_oq_process_rx()
455 skb_put(skb, buff_info->len); in __octep_oq_process_rx()
463 skb_put(skb, oq->max_single_buffer_size); in __octep_oq_process_rx()
465 data_len = buff_info->len - oq->max_single_buffer_size; in __octep_oq_process_rx()
468 &oq->buff_info[read_idx]; in __octep_oq_process_rx()
469 if (data_len < oq->buffer_size) { in __octep_oq_process_rx()
470 buff_info->len = data_len; in __octep_oq_process_rx()
473 buff_info->len = oq->buffer_size; in __octep_oq_process_rx()
474 data_len -= oq->buffer_size; in __octep_oq_process_rx()
477 skb_add_rx_frag(skb, shinfo->nr_frags, in __octep_oq_process_rx()
478 buff_info->page, 0, in __octep_oq_process_rx()
479 buff_info->len, in __octep_oq_process_rx()
480 buff_info->len); in __octep_oq_process_rx()
486 skb->dev = oq->netdev; in __octep_oq_process_rx()
487 skb->protocol = eth_type_trans(skb, skb->dev); in __octep_oq_process_rx()
490 skb->ip_summed = CHECKSUM_UNNECESSARY; in __octep_oq_process_rx()
492 skb->ip_summed = CHECKSUM_NONE; in __octep_oq_process_rx()
493 napi_gro_receive(oq->napi, skb); in __octep_oq_process_rx()
496 oq->host_read_idx = read_idx; in __octep_oq_process_rx()
497 oq->refill_count += desc_used; in __octep_oq_process_rx()
498 oq->stats->packets += pkt; in __octep_oq_process_rx()
499 oq->stats->bytes += rx_bytes; in __octep_oq_process_rx()
505 * octep_oq_process_rx() - Process Rx queue.
507 * @oq: Octeon Rx queue data structure.
518 struct octep_device *oct = oq->octep_dev; in octep_oq_process_rx()
525 if (oq->pkts_pending == 0) in octep_oq_process_rx()
527 pkts_available = min(budget - total_pkts_processed, in octep_oq_process_rx()
528 oq->pkts_pending); in octep_oq_process_rx()
534 oq->pkts_pending -= pkts_processed; in octep_oq_process_rx()
538 if (oq->refill_count >= oq->refill_threshold) { in octep_oq_process_rx()
543 writel(desc_refilled, oq->pkts_credit_reg); in octep_oq_process_rx()