Lines Matching full:buffer
13 #define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \ argument
34 static int hbg_dma_map(struct hbg_buffer *buffer) in hbg_dma_map() argument
36 struct hbg_priv *priv = buffer->priv; in hbg_dma_map()
38 buffer->skb_dma = dma_map_single(&priv->pdev->dev, in hbg_dma_map()
39 buffer->skb->data, buffer->skb_len, in hbg_dma_map()
40 buffer_to_dma_dir(buffer)); in hbg_dma_map()
41 if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) { in hbg_dma_map()
42 if (buffer->dir == HBG_DIR_RX) in hbg_dma_map()
53 static void hbg_dma_unmap(struct hbg_buffer *buffer) in hbg_dma_unmap() argument
55 struct hbg_priv *priv = buffer->priv; in hbg_dma_unmap()
57 if (unlikely(!buffer->skb_dma)) in hbg_dma_unmap()
60 dma_unmap_single(&priv->pdev->dev, buffer->skb_dma, buffer->skb_len, in hbg_dma_unmap()
61 buffer_to_dma_dir(buffer)); in hbg_dma_unmap()
62 buffer->skb_dma = 0; in hbg_dma_unmap()
65 static void hbg_init_tx_desc(struct hbg_buffer *buffer, in hbg_init_tx_desc() argument
68 u32 ip_offset = buffer->skb->network_header - buffer->skb->mac_header; in hbg_init_tx_desc()
73 if (likely(buffer->skb->ip_summed == CHECKSUM_PARTIAL)) { in hbg_init_tx_desc()
80 buffer->skb->len); in hbg_init_tx_desc()
81 tx_desc->word2 = buffer->skb_dma; in hbg_init_tx_desc()
82 tx_desc->word3 = buffer->state_dma; in hbg_init_tx_desc()
93 struct hbg_buffer *buffer; in hbg_net_start_xmit() local
109 buffer = &ring->queue[ntu]; in hbg_net_start_xmit()
110 buffer->skb = skb; in hbg_net_start_xmit()
111 buffer->skb_len = skb->len; in hbg_net_start_xmit()
112 if (unlikely(hbg_dma_map(buffer))) { in hbg_net_start_xmit()
117 buffer->state = HBG_TX_STATE_START; in hbg_net_start_xmit()
118 hbg_init_tx_desc(buffer, &tx_desc); in hbg_net_start_xmit()
129 static void hbg_buffer_free_skb(struct hbg_buffer *buffer) in hbg_buffer_free_skb() argument
131 if (unlikely(!buffer->skb)) in hbg_buffer_free_skb()
134 dev_kfree_skb_any(buffer->skb); in hbg_buffer_free_skb()
135 buffer->skb = NULL; in hbg_buffer_free_skb()
138 static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer) in hbg_buffer_alloc_skb() argument
140 u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir); in hbg_buffer_alloc_skb()
141 struct hbg_priv *priv = buffer->priv; in hbg_buffer_alloc_skb()
143 buffer->skb = netdev_alloc_skb(priv->netdev, len); in hbg_buffer_alloc_skb()
144 if (unlikely(!buffer->skb)) in hbg_buffer_alloc_skb()
147 buffer->skb_len = len; in hbg_buffer_alloc_skb()
148 memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE); in hbg_buffer_alloc_skb()
152 static void hbg_buffer_free(struct hbg_buffer *buffer) in hbg_buffer_free() argument
154 hbg_dma_unmap(buffer); in hbg_buffer_free()
155 hbg_buffer_free_skb(buffer); in hbg_buffer_free()
166 struct hbg_buffer *buffer; in hbg_napi_tx_recycle() local
182 buffer = &ring->queue[ntc]; in hbg_napi_tx_recycle()
183 if (buffer->state != HBG_TX_STATE_COMPLETE) in hbg_napi_tx_recycle()
186 hbg_buffer_free(buffer); in hbg_napi_tx_recycle()
374 struct hbg_buffer *buffer; in hbg_rx_fill_one_buffer() local
380 buffer = &ring->queue[ring->ntu]; in hbg_rx_fill_one_buffer()
381 ret = hbg_buffer_alloc_skb(buffer); in hbg_rx_fill_one_buffer()
385 ret = hbg_dma_map(buffer); in hbg_rx_fill_one_buffer()
387 hbg_buffer_free_skb(buffer); in hbg_rx_fill_one_buffer()
391 hbg_hw_fill_buffer(priv, buffer->skb_dma); in hbg_rx_fill_one_buffer()
397 struct hbg_buffer *buffer) in hbg_sync_data_from_hw() argument
404 dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma, in hbg_sync_data_from_hw()
405 buffer->skb_len, DMA_FROM_DEVICE); in hbg_sync_data_from_hw()
407 rx_desc = (struct hbg_rx_desc *)buffer->skb->data; in hbg_sync_data_from_hw()
416 struct hbg_buffer *buffer; in hbg_napi_rx_poll() local
424 buffer = &ring->queue[ring->ntc]; in hbg_napi_rx_poll()
425 if (unlikely(!buffer->skb)) in hbg_napi_rx_poll()
428 if (unlikely(!hbg_sync_data_from_hw(priv, buffer))) in hbg_napi_rx_poll()
430 rx_desc = (struct hbg_rx_desc *)buffer->skb->data; in hbg_napi_rx_poll()
433 if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) { in hbg_napi_rx_poll()
434 hbg_buffer_free(buffer); in hbg_napi_rx_poll()
438 hbg_dma_unmap(buffer); in hbg_napi_rx_poll()
439 skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN); in hbg_napi_rx_poll()
440 skb_put(buffer->skb, pkt_len); in hbg_napi_rx_poll()
441 buffer->skb->protocol = eth_type_trans(buffer->skb, in hbg_napi_rx_poll()
445 napi_gro_receive(napi, buffer->skb); in hbg_napi_rx_poll()
446 buffer->skb = NULL; in hbg_napi_rx_poll()
463 struct hbg_buffer *buffer; in hbg_ring_uninit() local
473 buffer = &ring->queue[i]; in hbg_ring_uninit()
474 hbg_buffer_free(buffer); in hbg_ring_uninit()
475 buffer->ring = NULL; in hbg_ring_uninit()
476 buffer->priv = NULL; in hbg_ring_uninit()
492 struct hbg_buffer *buffer; in hbg_ring_init() local
503 buffer = &ring->queue[i]; in hbg_ring_init()
504 buffer->skb_len = 0; in hbg_ring_init()
505 buffer->dir = dir; in hbg_ring_init()
506 buffer->ring = ring; in hbg_ring_init()
507 buffer->priv = priv; in hbg_ring_init()
508 buffer->state_dma = ring->queue_dma + (i * sizeof(*buffer)); in hbg_ring_init()