Lines Matching +full:local +full:- +full:bd +full:- +full:address
118 #include "xgbe-common.h"
131 if (ring->rdata) { in xgbe_free_ring()
132 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_free_ring()
137 kfree(ring->rdata); in xgbe_free_ring()
138 ring->rdata = NULL; in xgbe_free_ring()
141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring()
142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, in xgbe_free_ring()
143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); in xgbe_free_ring()
144 put_page(ring->rx_hdr_pa.pages); in xgbe_free_ring()
146 ring->rx_hdr_pa.pages = NULL; in xgbe_free_ring()
147 ring->rx_hdr_pa.pages_len = 0; in xgbe_free_ring()
148 ring->rx_hdr_pa.pages_offset = 0; in xgbe_free_ring()
149 ring->rx_hdr_pa.pages_dma = 0; in xgbe_free_ring()
152 if (ring->rx_buf_pa.pages) { in xgbe_free_ring()
153 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma, in xgbe_free_ring()
154 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE); in xgbe_free_ring()
155 put_page(ring->rx_buf_pa.pages); in xgbe_free_ring()
157 ring->rx_buf_pa.pages = NULL; in xgbe_free_ring()
158 ring->rx_buf_pa.pages_len = 0; in xgbe_free_ring()
159 ring->rx_buf_pa.pages_offset = 0; in xgbe_free_ring()
160 ring->rx_buf_pa.pages_dma = 0; in xgbe_free_ring()
163 if (ring->rdesc) { in xgbe_free_ring()
164 dma_free_coherent(pdata->dev, in xgbe_free_ring()
166 ring->rdesc_count), in xgbe_free_ring()
167 ring->rdesc, ring->rdesc_dma); in xgbe_free_ring()
168 ring->rdesc = NULL; in xgbe_free_ring()
177 DBGPR("-->xgbe_free_ring_resources\n"); in xgbe_free_ring_resources()
179 for (i = 0; i < pdata->channel_count; i++) { in xgbe_free_ring_resources()
180 channel = pdata->channel[i]; in xgbe_free_ring_resources()
181 xgbe_free_ring(pdata, channel->tx_ring); in xgbe_free_ring_resources()
182 xgbe_free_ring(pdata, channel->rx_ring); in xgbe_free_ring_resources()
185 DBGPR("<--xgbe_free_ring_resources\n"); in xgbe_free_ring_resources()
226 ring->rdesc_count = rdesc_count; in xgbe_init_ring()
227 ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma, in xgbe_init_ring()
228 ring->node); in xgbe_init_ring()
229 if (!ring->rdesc) in xgbe_init_ring()
230 return -ENOMEM; in xgbe_init_ring()
235 ring->rdata = xgbe_alloc_node(size, ring->node); in xgbe_init_ring()
236 if (!ring->rdata) in xgbe_init_ring()
237 return -ENOMEM; in xgbe_init_ring()
239 netif_dbg(pdata, drv, pdata->netdev, in xgbe_init_ring()
241 ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node); in xgbe_init_ring()
252 for (i = 0; i < pdata->channel_count; i++) { in xgbe_alloc_ring_resources()
253 channel = pdata->channel[i]; in xgbe_alloc_ring_resources()
254 netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n", in xgbe_alloc_ring_resources()
255 channel->name); in xgbe_alloc_ring_resources()
257 ret = xgbe_init_ring(pdata, channel->tx_ring, in xgbe_alloc_ring_resources()
258 pdata->tx_desc_count); in xgbe_alloc_ring_resources()
260 netdev_alert(pdata->netdev, in xgbe_alloc_ring_resources()
265 netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n", in xgbe_alloc_ring_resources()
266 channel->name); in xgbe_alloc_ring_resources()
268 ret = xgbe_init_ring(pdata, channel->rx_ring, in xgbe_alloc_ring_resources()
269 pdata->rx_desc_count); in xgbe_alloc_ring_resources()
271 netdev_alert(pdata->netdev, in xgbe_alloc_ring_resources()
304 order--; in xgbe_alloc_pages()
307 /* If we couldn't get local pages, try getting from anywhere */ in xgbe_alloc_pages()
314 return -ENOMEM; in xgbe_alloc_pages()
317 pages_dma = dma_map_page(pdata->dev, pages, 0, in xgbe_alloc_pages()
319 if (dma_mapping_error(pdata->dev, pages_dma)) { in xgbe_alloc_pages()
321 return -ENOMEM; in xgbe_alloc_pages()
324 pa->pages = pages; in xgbe_alloc_pages()
325 pa->pages_len = PAGE_SIZE << order; in xgbe_alloc_pages()
326 pa->pages_offset = 0; in xgbe_alloc_pages()
327 pa->pages_dma = pages_dma; in xgbe_alloc_pages()
332 static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd, in xgbe_set_buffer_data() argument
336 get_page(pa->pages); in xgbe_set_buffer_data()
337 bd->pa = *pa; in xgbe_set_buffer_data()
339 bd->dma_base = pa->pages_dma; in xgbe_set_buffer_data()
340 bd->dma_off = pa->pages_offset; in xgbe_set_buffer_data()
341 bd->dma_len = len; in xgbe_set_buffer_data()
343 pa->pages_offset += len; in xgbe_set_buffer_data()
344 if ((pa->pages_offset + len) > pa->pages_len) { in xgbe_set_buffer_data()
346 bd->pa_unmap = *pa; in xgbe_set_buffer_data()
349 pa->pages = NULL; in xgbe_set_buffer_data()
350 pa->pages_len = 0; in xgbe_set_buffer_data()
351 pa->pages_offset = 0; in xgbe_set_buffer_data()
352 pa->pages_dma = 0; in xgbe_set_buffer_data()
362 if (!ring->rx_hdr_pa.pages) { in xgbe_map_rx_buffer()
363 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node); in xgbe_map_rx_buffer()
368 if (!ring->rx_buf_pa.pages) { in xgbe_map_rx_buffer()
369 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, in xgbe_map_rx_buffer()
370 PAGE_ALLOC_COSTLY_ORDER, ring->node); in xgbe_map_rx_buffer()
376 if (pdata->netdev->features & NETIF_F_RXCSUM) { in xgbe_map_rx_buffer()
377 xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, in xgbe_map_rx_buffer()
380 xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, in xgbe_map_rx_buffer()
381 pdata->rx_buf_size); in xgbe_map_rx_buffer()
385 xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa, in xgbe_map_rx_buffer()
386 pdata->rx_buf_size); in xgbe_map_rx_buffer()
393 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_wrapper_tx_descriptor_init()
401 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n"); in xgbe_wrapper_tx_descriptor_init()
403 for (i = 0; i < pdata->channel_count; i++) { in xgbe_wrapper_tx_descriptor_init()
404 channel = pdata->channel[i]; in xgbe_wrapper_tx_descriptor_init()
405 ring = channel->tx_ring; in xgbe_wrapper_tx_descriptor_init()
409 rdesc = ring->rdesc; in xgbe_wrapper_tx_descriptor_init()
410 rdesc_dma = ring->rdesc_dma; in xgbe_wrapper_tx_descriptor_init()
412 for (j = 0; j < ring->rdesc_count; j++) { in xgbe_wrapper_tx_descriptor_init()
415 rdata->rdesc = rdesc; in xgbe_wrapper_tx_descriptor_init()
416 rdata->rdesc_dma = rdesc_dma; in xgbe_wrapper_tx_descriptor_init()
422 ring->cur = 0; in xgbe_wrapper_tx_descriptor_init()
423 ring->dirty = 0; in xgbe_wrapper_tx_descriptor_init()
424 memset(&ring->tx, 0, sizeof(ring->tx)); in xgbe_wrapper_tx_descriptor_init()
426 hw_if->tx_desc_init(channel); in xgbe_wrapper_tx_descriptor_init()
429 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n"); in xgbe_wrapper_tx_descriptor_init()
434 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_wrapper_rx_descriptor_init()
442 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); in xgbe_wrapper_rx_descriptor_init()
444 for (i = 0; i < pdata->channel_count; i++) { in xgbe_wrapper_rx_descriptor_init()
445 channel = pdata->channel[i]; in xgbe_wrapper_rx_descriptor_init()
446 ring = channel->rx_ring; in xgbe_wrapper_rx_descriptor_init()
450 rdesc = ring->rdesc; in xgbe_wrapper_rx_descriptor_init()
451 rdesc_dma = ring->rdesc_dma; in xgbe_wrapper_rx_descriptor_init()
453 for (j = 0; j < ring->rdesc_count; j++) { in xgbe_wrapper_rx_descriptor_init()
456 rdata->rdesc = rdesc; in xgbe_wrapper_rx_descriptor_init()
457 rdata->rdesc_dma = rdesc_dma; in xgbe_wrapper_rx_descriptor_init()
466 ring->cur = 0; in xgbe_wrapper_rx_descriptor_init()
467 ring->dirty = 0; in xgbe_wrapper_rx_descriptor_init()
469 hw_if->rx_desc_init(channel); in xgbe_wrapper_rx_descriptor_init()
472 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n"); in xgbe_wrapper_rx_descriptor_init()
478 if (rdata->skb_dma) { in xgbe_unmap_rdata()
479 if (rdata->mapped_as_page) { in xgbe_unmap_rdata()
480 dma_unmap_page(pdata->dev, rdata->skb_dma, in xgbe_unmap_rdata()
481 rdata->skb_dma_len, DMA_TO_DEVICE); in xgbe_unmap_rdata()
483 dma_unmap_single(pdata->dev, rdata->skb_dma, in xgbe_unmap_rdata()
484 rdata->skb_dma_len, DMA_TO_DEVICE); in xgbe_unmap_rdata()
486 rdata->skb_dma = 0; in xgbe_unmap_rdata()
487 rdata->skb_dma_len = 0; in xgbe_unmap_rdata()
490 if (rdata->skb) { in xgbe_unmap_rdata()
491 dev_kfree_skb_any(rdata->skb); in xgbe_unmap_rdata()
492 rdata->skb = NULL; in xgbe_unmap_rdata()
495 if (rdata->rx.hdr.pa.pages) in xgbe_unmap_rdata()
496 put_page(rdata->rx.hdr.pa.pages); in xgbe_unmap_rdata()
498 if (rdata->rx.hdr.pa_unmap.pages) { in xgbe_unmap_rdata()
499 dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma, in xgbe_unmap_rdata()
500 rdata->rx.hdr.pa_unmap.pages_len, in xgbe_unmap_rdata()
502 put_page(rdata->rx.hdr.pa_unmap.pages); in xgbe_unmap_rdata()
505 if (rdata->rx.buf.pa.pages) in xgbe_unmap_rdata()
506 put_page(rdata->rx.buf.pa.pages); in xgbe_unmap_rdata()
508 if (rdata->rx.buf.pa_unmap.pages) { in xgbe_unmap_rdata()
509 dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma, in xgbe_unmap_rdata()
510 rdata->rx.buf.pa_unmap.pages_len, in xgbe_unmap_rdata()
512 put_page(rdata->rx.buf.pa_unmap.pages); in xgbe_unmap_rdata()
515 memset(&rdata->tx, 0, sizeof(rdata->tx)); in xgbe_unmap_rdata()
516 memset(&rdata->rx, 0, sizeof(rdata->rx)); in xgbe_unmap_rdata()
518 rdata->mapped_as_page = 0; in xgbe_unmap_rdata()
520 if (rdata->state_saved) { in xgbe_unmap_rdata()
521 rdata->state_saved = 0; in xgbe_unmap_rdata()
522 rdata->state.skb = NULL; in xgbe_unmap_rdata()
523 rdata->state.len = 0; in xgbe_unmap_rdata()
524 rdata->state.error = 0; in xgbe_unmap_rdata()
530 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_map_tx_skb()
531 struct xgbe_ring *ring = channel->tx_ring; in xgbe_map_tx_skb()
540 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur); in xgbe_map_tx_skb()
543 start_index = ring->cur; in xgbe_map_tx_skb()
544 cur_index = ring->cur; in xgbe_map_tx_skb()
546 packet = &ring->packet_data; in xgbe_map_tx_skb()
547 packet->rdesc_count = 0; in xgbe_map_tx_skb()
548 packet->length = 0; in xgbe_map_tx_skb()
550 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_map_tx_skb()
552 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_map_tx_skb()
556 if ((tso && (packet->mss != ring->tx.cur_mss)) || in xgbe_map_tx_skb()
557 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))) in xgbe_map_tx_skb()
563 skb_dma = dma_map_single(pdata->dev, skb->data, in xgbe_map_tx_skb()
564 packet->header_len, DMA_TO_DEVICE); in xgbe_map_tx_skb()
565 if (dma_mapping_error(pdata->dev, skb_dma)) { in xgbe_map_tx_skb()
566 netdev_alert(pdata->netdev, "dma_map_single failed\n"); in xgbe_map_tx_skb()
569 rdata->skb_dma = skb_dma; in xgbe_map_tx_skb()
570 rdata->skb_dma_len = packet->header_len; in xgbe_map_tx_skb()
571 netif_dbg(pdata, tx_queued, pdata->netdev, in xgbe_map_tx_skb()
573 cur_index, &skb_dma, packet->header_len); in xgbe_map_tx_skb()
575 offset = packet->header_len; in xgbe_map_tx_skb()
577 packet->length += packet->header_len; in xgbe_map_tx_skb()
584 for (datalen = skb_headlen(skb) - offset; datalen; ) { in xgbe_map_tx_skb()
587 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, in xgbe_map_tx_skb()
589 if (dma_mapping_error(pdata->dev, skb_dma)) { in xgbe_map_tx_skb()
590 netdev_alert(pdata->netdev, "dma_map_single failed\n"); in xgbe_map_tx_skb()
593 rdata->skb_dma = skb_dma; in xgbe_map_tx_skb()
594 rdata->skb_dma_len = len; in xgbe_map_tx_skb()
595 netif_dbg(pdata, tx_queued, pdata->netdev, in xgbe_map_tx_skb()
599 datalen -= len; in xgbe_map_tx_skb()
602 packet->length += len; in xgbe_map_tx_skb()
608 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in xgbe_map_tx_skb()
609 netif_dbg(pdata, tx_queued, pdata->netdev, in xgbe_map_tx_skb()
612 frag = &skb_shinfo(skb)->frags[i]; in xgbe_map_tx_skb()
619 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, in xgbe_map_tx_skb()
621 if (dma_mapping_error(pdata->dev, skb_dma)) { in xgbe_map_tx_skb()
622 netdev_alert(pdata->netdev, in xgbe_map_tx_skb()
626 rdata->skb_dma = skb_dma; in xgbe_map_tx_skb()
627 rdata->skb_dma_len = len; in xgbe_map_tx_skb()
628 rdata->mapped_as_page = 1; in xgbe_map_tx_skb()
629 netif_dbg(pdata, tx_queued, pdata->netdev, in xgbe_map_tx_skb()
633 datalen -= len; in xgbe_map_tx_skb()
636 packet->length += len; in xgbe_map_tx_skb()
643 /* Save the skb address in the last entry. We always have some data in xgbe_map_tx_skb()
645 * piece of mapped data - use the entry pointed to by cur_index - 1. in xgbe_map_tx_skb()
647 rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1); in xgbe_map_tx_skb()
648 rdata->skb = skb; in xgbe_map_tx_skb()
651 packet->rdesc_count = cur_index - start_index; in xgbe_map_tx_skb()
653 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count); in xgbe_map_tx_skb()
655 return packet->rdesc_count; in xgbe_map_tx_skb()
663 DBGPR("<--xgbe_map_tx_skb: count=0\n"); in xgbe_map_tx_skb()
670 DBGPR("-->xgbe_init_function_ptrs_desc\n"); in xgbe_init_function_ptrs_desc()
672 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; in xgbe_init_function_ptrs_desc()
673 desc_if->free_ring_resources = xgbe_free_ring_resources; in xgbe_init_function_ptrs_desc()
674 desc_if->map_tx_skb = xgbe_map_tx_skb; in xgbe_init_function_ptrs_desc()
675 desc_if->map_rx_buffer = xgbe_map_rx_buffer; in xgbe_init_function_ptrs_desc()
676 desc_if->unmap_rdata = xgbe_unmap_rdata; in xgbe_init_function_ptrs_desc()
677 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; in xgbe_init_function_ptrs_desc()
678 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; in xgbe_init_function_ptrs_desc()
680 DBGPR("<--xgbe_init_function_ptrs_desc\n"); in xgbe_init_function_ptrs_desc()