Lines Matching refs:unmap_q

88 /* Caller should ensure that the entry at unmap_q[index] is valid */
91 struct bnad_tx_unmap *unmap_q,
98 unmap = &unmap_q[index];
116 unmap = &unmap_q[index];
140 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
145 skb = unmap_q[i].skb;
148 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
164 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
181 unmap = &unmap_q[cons];
191 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
250 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
252 unmap_q->reuse_pi = -1;
253 unmap_q->alloc_order = -1;
254 unmap_q->map_size = 0;
255 unmap_q->type = BNAD_RXBUF_NONE;
262 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
269 unmap_q->type = BNAD_RXBUF_PAGE;
272 unmap_q->alloc_order = 0;
273 unmap_q->map_size = rcb->rxq->buffer_size;
276 unmap_q->alloc_order = 0;
277 unmap_q->map_size = rcb->rxq->buffer_size;
278 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
280 unmap_q->alloc_order = order;
281 unmap_q->map_size =
287 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
325 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
329 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
331 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
343 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
353 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
357 unmap = &unmap_q->unmap[prod];
359 if (unmap_q->reuse_pi < 0) {
361 unmap_q->alloc_order);
364 prev = &unmap_q->unmap[unmap_q->reuse_pi];
366 page_offset = prev->page_offset + unmap_q->map_size;
377 unmap_q->map_size, DMA_FROM_DEVICE);
388 unmap->vector.len = unmap_q->map_size;
389 page_offset += unmap_q->map_size;
392 unmap_q->reuse_pi = prod;
394 unmap_q->reuse_pi = -1;
417 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
429 unmap = &unmap_q->unmap[prod];
472 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
479 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
503 struct bnad_rx_unmap_q *unmap_q;
507 unmap_q = rcb->unmap_q;
509 unmap = &unmap_q->unmap[ci];
512 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
524 struct bnad_rx_unmap_q *unmap_q;
533 unmap_q = rcb->unmap_q;
538 prefetch(page_address(unmap_q->unmap[ci].page) +
539 unmap_q->unmap[ci].page_offset);
545 unmap = &unmap_q->unmap[ci];
592 struct bnad_rx_unmap_q *unmap_q;
625 unmap_q = rcb->unmap_q;
630 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
631 unmap = &unmap_q->unmap[sop_ci];
648 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
690 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
715 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
2919 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2960 unmap_q = tcb->unmap_q;
3002 head_unmap = &unmap_q[prod];
3035 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3050 unmap = &unmap_q[prod];
3057 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3074 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);