Lines Matching refs:buffer

13 #define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \
34 static int hbg_dma_map(struct hbg_buffer *buffer)
36 struct hbg_priv *priv = buffer->priv;
38 buffer->skb_dma = dma_map_single(&priv->pdev->dev,
39 buffer->skb->data, buffer->skb_len,
40 buffer_to_dma_dir(buffer));
41 if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) {
42 if (buffer->dir == HBG_DIR_RX)
53 static void hbg_dma_unmap(struct hbg_buffer *buffer)
55 struct hbg_priv *priv = buffer->priv;
57 if (unlikely(!buffer->skb_dma))
60 dma_unmap_single(&priv->pdev->dev, buffer->skb_dma, buffer->skb_len,
61 buffer_to_dma_dir(buffer));
62 buffer->skb_dma = 0;
65 static void hbg_init_tx_desc(struct hbg_buffer *buffer,
68 u32 ip_offset = buffer->skb->network_header - buffer->skb->mac_header;
73 if (likely(buffer->skb->ip_summed == CHECKSUM_PARTIAL)) {
80 buffer->skb->len);
81 tx_desc->word2 = buffer->skb_dma;
82 tx_desc->word3 = buffer->state_dma;
93 struct hbg_buffer *buffer;
109 buffer = &ring->queue[ntu];
110 buffer->skb = skb;
111 buffer->skb_len = skb->len;
112 if (unlikely(hbg_dma_map(buffer))) {
117 buffer->state = HBG_TX_STATE_START;
118 hbg_init_tx_desc(buffer, &tx_desc);
129 static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
131 if (unlikely(!buffer->skb))
134 dev_kfree_skb_any(buffer->skb);
135 buffer->skb = NULL;
138 static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer)
140 u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir);
141 struct hbg_priv *priv = buffer->priv;
143 buffer->skb = netdev_alloc_skb(priv->netdev, len);
144 if (unlikely(!buffer->skb))
147 buffer->skb_len = len;
148 memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE);
152 static void hbg_buffer_free(struct hbg_buffer *buffer)
154 hbg_dma_unmap(buffer);
155 hbg_buffer_free_skb(buffer);
166 struct hbg_buffer *buffer;
182 buffer = &ring->queue[ntc];
183 if (buffer->state != HBG_TX_STATE_COMPLETE)
186 hbg_buffer_free(buffer);
374 struct hbg_buffer *buffer;
380 buffer = &ring->queue[ring->ntu];
381 ret = hbg_buffer_alloc_skb(buffer);
385 ret = hbg_dma_map(buffer);
387 hbg_buffer_free_skb(buffer);
391 hbg_hw_fill_buffer(priv, buffer->skb_dma);
397 struct hbg_buffer *buffer)
404 dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma,
405 buffer->skb_len, DMA_FROM_DEVICE);
407 rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
416 struct hbg_buffer *buffer;
424 buffer = &ring->queue[ring->ntc];
425 if (unlikely(!buffer->skb))
428 if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
430 rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
433 if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
434 hbg_buffer_free(buffer);
438 hbg_dma_unmap(buffer);
439 skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
440 skb_put(buffer->skb, pkt_len);
441 buffer->skb->protocol = eth_type_trans(buffer->skb,
445 napi_gro_receive(napi, buffer->skb);
446 buffer->skb = NULL;
463 struct hbg_buffer *buffer;
473 buffer = &ring->queue[i];
474 hbg_buffer_free(buffer);
475 buffer->ring = NULL;
476 buffer->priv = NULL;
492 struct hbg_buffer *buffer;
503 buffer = &ring->queue[i];
504 buffer->skb_len = 0;
505 buffer->dir = dir;
506 buffer->ring = ring;
507 buffer->priv = priv;
508 buffer->state_dma = ring->queue_dma + (i * sizeof(*buffer));