Lines Matching +full:rx +full:- +full:queues +full:- +full:to +full:- +full:use
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (c) 2014-2015 Hisilicon Limited.
11 * a set of queues provided by AE
13 * the channel between upper layer and the AE, can do tx and rx
15 * a tx or rx channel within a rbq
68 /* some said the RX and TX RCB format should not be the same in the future. But
79 #define RCB_REG_OFFSET 0x24 /* pkt num to be handled */
209 } rx; member
217 /* priv data for the desc, e.g. skb when use with ip stack*/
224 /* desc type, used by the ring user to mark the type of the priv data */
231 /* hnae_ring->flags fields */
232 #define RINGF_DIR 0x1 /* TX or RX ring, set if TX */
233 #define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
278 u32 buf_size; /* size for hnae_desc->addr, preset by AE */
285 /* idx of lastest sent desc, the ring is empty when equal to
293 /* total rx bytes after last rx rate calucated */
297 u32 coal_rx_rate; /* rx rate in MB */
301 ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
303 ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
311 assert((idx) >= 0 && (idx) < (ring)->desc_num)
321 return (end - begin + ring->desc_num) % ring->desc_num; in ring_dist()
326 return ring->desc_num - in ring_space()
327 ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; in ring_space()
332 assert_is_ring_idx(ring, ring->next_to_use); in is_ring_empty()
333 assert_is_ring_idx(ring, ring->next_to_clean); in is_ring_empty()
335 return ring->next_to_use == ring->next_to_clean; in is_ring_empty()
338 #define hnae_buf_size(_ring) ((_ring)->buf_size)
355 struct hnae_ae_dev *dev; /* the device who use this queue */
387 * Get a handle from AE according to its name and options.
388 * the AE driver should manage the space used by handle and its queues while
390 * queues.
394 * Enable the hardware, include all queues
398 * Set options to the AE
403 * non-ok
405 * Set the ring irq to be enabled(0) or disable(1)
407 * Set the queue to be enabled(1) or disable(0), this will not change the
416 * get tx and rx of pause frame use
418 * set auto autonegotiation of pause frame use
420 * get auto autonegotiation of pause frame use
422 * set tx and rx of pause frame use
424 * get usecs to delay a TX interrupt after a packet is sent
426 * get Maximum number of packets to be sent before a TX interrupt.
428 * set usecs to delay a TX interrupt after a packet is sent
430 * set Maximum number of packets to be sent before a TX interrupt.
432 * get RX/TX ring number
434 * get RX/TX ring maximum number
545 spinlock_t lock; /* lock to protect the handle_list */
549 struct device *owner_dev; /* the device which make use of this handle */
565 struct list_head node; /* list to hnae_ae_dev->handle_list */
567 struct hnae_queue **qs; /* array base of all queues */
570 #define ring_to_dev(ring) ((ring)->q->dev->dev)
586 (q)->tx_ring.io_base + RCB_REG_TAIL)
595 struct hnae_buf_ops *bops = ring->q->handle->bops; in hnae_reserve_buffer_map()
598 ret = bops->alloc_buffer(ring, cb); in hnae_reserve_buffer_map()
602 ret = bops->map_buffer(ring, cb); in hnae_reserve_buffer_map()
609 bops->free_buffer(ring, cb); in hnae_reserve_buffer_map()
616 int ret = hnae_reserve_buffer_map(ring, &ring->desc_cb[i]); in hnae_alloc_buffer_attach()
621 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); in hnae_alloc_buffer_attach()
628 ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]); in hnae_buffer_detach()
629 ring->desc[i].addr = 0; in hnae_buffer_detach()
634 struct hnae_buf_ops *bops = ring->q->handle->bops; in hnae_free_buffer_detach()
635 struct hnae_desc_cb *cb = &ring->desc_cb[i]; in hnae_free_buffer_detach()
637 if (!ring->desc_cb[i].dma) in hnae_free_buffer_detach()
641 bops->free_buffer(ring, cb); in hnae_free_buffer_detach()
644 /* detach a in-used buffer and replace with a reserved one */
648 struct hnae_buf_ops *bops = ring->q->handle->bops; in hnae_replace_buffer()
650 bops->unmap_buffer(ring, &ring->desc_cb[i]); in hnae_replace_buffer()
651 ring->desc_cb[i] = *res_cb; in hnae_replace_buffer()
652 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); in hnae_replace_buffer()
653 ring->desc[i].rx.ipoff_bnum_pid_flag = 0; in hnae_replace_buffer()
658 ring->desc_cb[i].reuse_flag = 0; in hnae_reuse_buffer()
659 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma in hnae_reuse_buffer()
660 + ring->desc_cb[i].page_offset); in hnae_reuse_buffer()
661 ring->desc[i].rx.ipoff_bnum_pid_flag = 0; in hnae_reuse_buffer()
670 for (i = 0; i < h->q_num; i++) { in hnae_reinit_all_ring_desc()
671 ring = &h->qs[i]->rx_ring; in hnae_reinit_all_ring_desc()
672 for (j = 0; j < ring->desc_num; j++) in hnae_reinit_all_ring_desc()
673 ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma); in hnae_reinit_all_ring_desc()
685 for (i = 0; i < h->q_num; i++) { in hnae_reinit_all_ring_page_off()
686 ring = &h->qs[i]->rx_ring; in hnae_reinit_all_ring_page_off()
687 for (j = 0; j < ring->desc_num; j++) { in hnae_reinit_all_ring_page_off()
688 ring->desc_cb[j].page_offset = 0; in hnae_reinit_all_ring_page_off()
689 if (ring->desc[j].addr != in hnae_reinit_all_ring_page_off()
690 cpu_to_le64(ring->desc_cb[j].dma)) in hnae_reinit_all_ring_page_off()
691 ring->desc[j].addr = in hnae_reinit_all_ring_page_off()
692 cpu_to_le64(ring->desc_cb[j].dma); in hnae_reinit_all_ring_page_off()