Lines Matching +full:hw +full:- +full:gro

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2021-2022, Intel Corporation.
23 #include <linux/dma-direction.h>
24 #include <linux/dma-mapping.h>
44 #include <net/gro.h>
75 value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer)); in t7xx_normal_pit_bid()
77 value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header)); in t7xx_normal_pit_bid()
84 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; in t7xx_dpmaif_update_bat_wr_idx()
85 struct dpmaif_bat_request *bat_req = rxq->bat_req; in t7xx_dpmaif_update_bat_wr_idx()
88 if (!rxq->que_started) { in t7xx_dpmaif_update_bat_wr_idx()
89 dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index); in t7xx_dpmaif_update_bat_wr_idx()
90 return -EINVAL; in t7xx_dpmaif_update_bat_wr_idx()
93 old_rl_idx = bat_req->bat_release_rd_idx; in t7xx_dpmaif_update_bat_wr_idx()
94 old_wr_idx = bat_req->bat_wr_idx; in t7xx_dpmaif_update_bat_wr_idx()
100 if (new_wr_idx >= bat_req->bat_size_cnt) { in t7xx_dpmaif_update_bat_wr_idx()
101 new_wr_idx -= bat_req->bat_size_cnt; in t7xx_dpmaif_update_bat_wr_idx()
106 bat_req->bat_wr_idx = new_wr_idx; in t7xx_dpmaif_update_bat_wr_idx()
110 dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n"); in t7xx_dpmaif_update_bat_wr_idx()
111 return -EINVAL; in t7xx_dpmaif_update_bat_wr_idx()
124 data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE); in t7xx_alloc_and_map_skb_info()
125 if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) { in t7xx_alloc_and_map_skb_info()
126 dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n"); in t7xx_alloc_and_map_skb_info()
131 cur_skb->skb = skb; in t7xx_alloc_and_map_skb_info()
132 cur_skb->data_bus_addr = data_bus_addr; in t7xx_alloc_and_map_skb_info()
133 cur_skb->data_len = size; in t7xx_alloc_and_map_skb_info()
143 if (bat_skb->skb) { in t7xx_unmap_bat_skb()
144 dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE); in t7xx_unmap_bat_skb()
145 dev_kfree_skb(bat_skb->skb); in t7xx_unmap_bat_skb()
146 bat_skb->skb = NULL; in t7xx_unmap_bat_skb()
151 * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring.
159 * If this is not the initial call, notify the HW about the new entries.
162 * * 0 - Success.
163 * * -ERROR - Error code.
173 if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt) in t7xx_dpmaif_rx_buf_alloc()
174 return -EINVAL; in t7xx_dpmaif_rx_buf_alloc()
177 bat_max_cnt = bat_req->bat_size_cnt; in t7xx_dpmaif_rx_buf_alloc()
179 bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx, in t7xx_dpmaif_rx_buf_alloc()
180 bat_req->bat_wr_idx, DPMAIF_WRITE); in t7xx_dpmaif_rx_buf_alloc()
182 return -ENOMEM; in t7xx_dpmaif_rx_buf_alloc()
184 bat_start_idx = bat_req->bat_wr_idx; in t7xx_dpmaif_rx_buf_alloc()
192 cur_bat_idx -= bat_max_cnt; in t7xx_dpmaif_rx_buf_alloc()
194 cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx; in t7xx_dpmaif_rx_buf_alloc()
195 if (!cur_skb->skb && in t7xx_dpmaif_rx_buf_alloc()
196 !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb)) in t7xx_dpmaif_rx_buf_alloc()
199 cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx; in t7xx_dpmaif_rx_buf_alloc()
200 cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr); in t7xx_dpmaif_rx_buf_alloc()
201 cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr); in t7xx_dpmaif_rx_buf_alloc()
205 return -ENOMEM; in t7xx_dpmaif_rx_buf_alloc()
214 ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i); in t7xx_dpmaif_rx_buf_alloc()
218 hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info, in t7xx_dpmaif_rx_buf_alloc()
220 if (hw_wr_idx != bat_req->bat_wr_idx) { in t7xx_dpmaif_rx_buf_alloc()
221 ret = -EFAULT; in t7xx_dpmaif_rx_buf_alloc()
222 dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n"); in t7xx_dpmaif_rx_buf_alloc()
230 while (i--) in t7xx_dpmaif_rx_buf_alloc()
231 t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); in t7xx_dpmaif_rx_buf_alloc()
239 struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; in t7xx_dpmaifq_release_pit_entry()
243 if (!rxq->que_started) in t7xx_dpmaifq_release_pit_entry()
246 if (rel_entry_num >= rxq->pit_size_cnt) { in t7xx_dpmaifq_release_pit_entry()
247 dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n"); in t7xx_dpmaifq_release_pit_entry()
248 return -EINVAL; in t7xx_dpmaifq_release_pit_entry()
251 old_rel_idx = rxq->pit_release_rd_idx; in t7xx_dpmaifq_release_pit_entry()
253 hw_wr_idx = rxq->pit_wr_idx; in t7xx_dpmaifq_release_pit_entry()
254 if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt) in t7xx_dpmaifq_release_pit_entry()
255 new_rel_idx -= rxq->pit_size_cnt; in t7xx_dpmaifq_release_pit_entry()
257 ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num); in t7xx_dpmaifq_release_pit_entry()
259 dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret); in t7xx_dpmaifq_release_pit_entry()
263 rxq->pit_release_rd_idx = new_rel_idx; in t7xx_dpmaifq_release_pit_entry()
271 spin_lock_irqsave(&bat_req->mask_lock, flags); in t7xx_dpmaif_set_bat_mask()
272 set_bit(idx, bat_req->bat_bitmap); in t7xx_dpmaif_set_bat_mask()
273 spin_unlock_irqrestore(&bat_req->mask_lock, flags); in t7xx_dpmaif_set_bat_mask()
279 struct dpmaif_bat_request *bat_frag = rxq->bat_frag; in t7xx_frag_bat_cur_bid_check()
283 return -EINVAL; in t7xx_frag_bat_cur_bid_check()
285 bat_page = bat_frag->bat_skb + cur_bid; in t7xx_frag_bat_cur_bid_check()
286 if (!bat_page->page) in t7xx_frag_bat_cur_bid_check()
287 return -EINVAL; in t7xx_frag_bat_cur_bid_check()
297 if (bat_page->page) { in t7xx_unmap_bat_page()
298 dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE); in t7xx_unmap_bat_page()
299 put_page(bat_page->page); in t7xx_unmap_bat_page()
300 bat_page->page = NULL; in t7xx_unmap_bat_page()
305 * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring.
314 * If this is not the initial call, notify the HW about the new entries.
317 * * 0 - Success.
318 * * -ERROR - Error code.
323 unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx; in t7xx_dpmaif_rx_frag_alloc()
324 struct dpmaif_bat_page *bat_skb = bat_req->bat_skb; in t7xx_dpmaif_rx_frag_alloc()
327 if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt) in t7xx_dpmaif_rx_frag_alloc()
328 return -EINVAL; in t7xx_dpmaif_rx_frag_alloc()
330 buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt, in t7xx_dpmaif_rx_frag_alloc()
331 bat_req->bat_release_rd_idx, bat_req->bat_wr_idx, in t7xx_dpmaif_rx_frag_alloc()
334 dev_err(dpmaif_ctrl->dev, in t7xx_dpmaif_rx_frag_alloc()
336 return -EINVAL; in t7xx_dpmaif_rx_frag_alloc()
344 if (!cur_page->page) { in t7xx_dpmaif_rx_frag_alloc()
349 data = netdev_alloc_frag(bat_req->pkt_buf_sz); in t7xx_dpmaif_rx_frag_alloc()
354 offset = data - page_address(page); in t7xx_dpmaif_rx_frag_alloc()
356 data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset, in t7xx_dpmaif_rx_frag_alloc()
357 bat_req->pkt_buf_sz, DMA_FROM_DEVICE); in t7xx_dpmaif_rx_frag_alloc()
358 if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) { in t7xx_dpmaif_rx_frag_alloc()
360 dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n"); in t7xx_dpmaif_rx_frag_alloc()
364 cur_page->page = page; in t7xx_dpmaif_rx_frag_alloc()
365 cur_page->data_bus_addr = data_base_addr; in t7xx_dpmaif_rx_frag_alloc()
366 cur_page->offset = offset; in t7xx_dpmaif_rx_frag_alloc()
367 cur_page->data_len = bat_req->pkt_buf_sz; in t7xx_dpmaif_rx_frag_alloc()
370 data_base_addr = cur_page->data_bus_addr; in t7xx_dpmaif_rx_frag_alloc()
371 cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx; in t7xx_dpmaif_rx_frag_alloc()
372 cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr); in t7xx_dpmaif_rx_frag_alloc()
373 cur_bat->p_buffer_addr = lower_32_bits(data_base_addr); in t7xx_dpmaif_rx_frag_alloc()
374 cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx); in t7xx_dpmaif_rx_frag_alloc()
377 bat_req->bat_wr_idx = cur_bat_idx; in t7xx_dpmaif_rx_frag_alloc()
380 t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i); in t7xx_dpmaif_rx_frag_alloc()
383 ret = -ENOMEM; in t7xx_dpmaif_rx_frag_alloc()
385 while (--i > 0) in t7xx_dpmaif_rx_frag_alloc()
386 t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i); in t7xx_dpmaif_rx_frag_alloc()
398 struct device *dev = rxq->dpmaif_ctrl->dev; in t7xx_dpmaif_set_frag_to_skb()
403 page_info = rxq->bat_frag->bat_skb; in t7xx_dpmaif_set_frag_to_skb()
405 dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); in t7xx_dpmaif_set_frag_to_skb()
407 if (!page_info->page) in t7xx_dpmaif_set_frag_to_skb()
408 return -EINVAL; in t7xx_dpmaif_set_frag_to_skb()
410 data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); in t7xx_dpmaif_set_frag_to_skb()
411 data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); in t7xx_dpmaif_set_frag_to_skb()
412 data_base_addr = page_info->data_bus_addr; in t7xx_dpmaif_set_frag_to_skb()
413 data_offset = data_bus_addr - data_base_addr; in t7xx_dpmaif_set_frag_to_skb()
414 data_offset += page_info->offset; in t7xx_dpmaif_set_frag_to_skb()
415 data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); in t7xx_dpmaif_set_frag_to_skb()
416 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page, in t7xx_dpmaif_set_frag_to_skb()
417 data_offset, data_len, page_info->data_len); in t7xx_dpmaif_set_frag_to_skb()
419 page_info->page = NULL; in t7xx_dpmaif_set_frag_to_skb()
420 page_info->offset = 0; in t7xx_dpmaif_set_frag_to_skb()
421 page_info->data_len = 0; in t7xx_dpmaif_set_frag_to_skb()
436 ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb); in t7xx_dpmaif_get_frag()
438 dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret); in t7xx_dpmaif_get_frag()
442 t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid); in t7xx_dpmaif_get_frag()
448 struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb; in t7xx_bat_cur_bid_check()
451 if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb) in t7xx_bat_cur_bid_check()
452 return -EINVAL; in t7xx_bat_cur_bid_check()
459 return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer)); in t7xx_dpmaif_read_pit_seq()
465 unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq; in t7xx_dpmaif_check_pit_seq()
470 return -EFAULT; in t7xx_dpmaif_check_pit_seq()
472 rxq->expect_pit_seq++; in t7xx_dpmaif_check_pit_seq()
473 if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE) in t7xx_dpmaif_check_pit_seq()
474 rxq->expect_pit_seq = 0; in t7xx_dpmaif_check_pit_seq()
484 spin_lock_irqsave(&bat_req->mask_lock, flags); in t7xx_dpmaif_avail_pkt_bat_cnt()
486 zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt, in t7xx_dpmaif_avail_pkt_bat_cnt()
487 bat_req->bat_release_rd_idx); in t7xx_dpmaif_avail_pkt_bat_cnt()
489 if (zero_index < bat_req->bat_size_cnt) { in t7xx_dpmaif_avail_pkt_bat_cnt()
490 spin_unlock_irqrestore(&bat_req->mask_lock, flags); in t7xx_dpmaif_avail_pkt_bat_cnt()
491 return zero_index - bat_req->bat_release_rd_idx; in t7xx_dpmaif_avail_pkt_bat_cnt()
495 zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx); in t7xx_dpmaif_avail_pkt_bat_cnt()
496 spin_unlock_irqrestore(&bat_req->mask_lock, flags); in t7xx_dpmaif_avail_pkt_bat_cnt()
497 return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index; in t7xx_dpmaif_avail_pkt_bat_cnt()
504 struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; in t7xx_dpmaif_release_bat_entry()
509 if (!rxq->que_started || !rel_entry_num) in t7xx_dpmaif_release_bat_entry()
510 return -EINVAL; in t7xx_dpmaif_release_bat_entry()
513 bat = rxq->bat_frag; in t7xx_dpmaif_release_bat_entry()
514 hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index); in t7xx_dpmaif_release_bat_entry()
516 bat = rxq->bat_req; in t7xx_dpmaif_release_bat_entry()
517 hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index); in t7xx_dpmaif_release_bat_entry()
520 if (rel_entry_num >= bat->bat_size_cnt) in t7xx_dpmaif_release_bat_entry()
521 return -EINVAL; in t7xx_dpmaif_release_bat_entry()
523 old_rel_idx = bat->bat_release_rd_idx; in t7xx_dpmaif_release_bat_entry()
527 if (bat->bat_wr_idx == old_rel_idx) in t7xx_dpmaif_release_bat_entry()
532 return -EINVAL; in t7xx_dpmaif_release_bat_entry()
535 if (new_rel_idx >= bat->bat_size_cnt) { in t7xx_dpmaif_release_bat_entry()
536 new_rel_idx -= bat->bat_size_cnt; in t7xx_dpmaif_release_bat_entry()
538 return -EINVAL; in t7xx_dpmaif_release_bat_entry()
541 spin_lock_irqsave(&bat->mask_lock, flags); in t7xx_dpmaif_release_bat_entry()
543 unsigned int index = bat->bat_release_rd_idx + i; in t7xx_dpmaif_release_bat_entry()
545 if (index >= bat->bat_size_cnt) in t7xx_dpmaif_release_bat_entry()
546 index -= bat->bat_size_cnt; in t7xx_dpmaif_release_bat_entry()
548 clear_bit(index, bat->bat_bitmap); in t7xx_dpmaif_release_bat_entry()
550 spin_unlock_irqrestore(&bat->mask_lock, flags); in t7xx_dpmaif_release_bat_entry()
552 bat->bat_release_rd_idx = new_rel_idx; in t7xx_dpmaif_release_bat_entry()
560 if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD) in t7xx_dpmaif_pit_release_and_add()
563 ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt); in t7xx_dpmaif_pit_release_and_add()
567 rxq->pit_remain_release_cnt = 0; in t7xx_dpmaif_pit_release_and_add()
576 bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req); in t7xx_dpmaif_bat_release_and_add()
582 dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret); in t7xx_dpmaif_bat_release_and_add()
586 ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false); in t7xx_dpmaif_bat_release_and_add()
588 dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret); in t7xx_dpmaif_bat_release_and_add()
598 bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag); in t7xx_dpmaif_frag_bat_release_and_add()
604 dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret); in t7xx_dpmaif_frag_bat_release_and_add()
608 return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false); in t7xx_dpmaif_frag_bat_release_and_add()
615 int header = le32_to_cpu(msg_pit->header); in t7xx_dpmaif_parse_msg_pit()
617 skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header); in t7xx_dpmaif_parse_msg_pit()
618 skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header); in t7xx_dpmaif_parse_msg_pit()
619 skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header); in t7xx_dpmaif_parse_msg_pit()
620 skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3)); in t7xx_dpmaif_parse_msg_pit()
628 struct device *dev = rxq->dpmaif_ctrl->dev; in t7xx_dpmaif_set_data_to_skb()
634 bat_skb = rxq->bat_req->bat_skb; in t7xx_dpmaif_set_data_to_skb()
636 dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE); in t7xx_dpmaif_set_data_to_skb()
638 data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); in t7xx_dpmaif_set_data_to_skb()
639 data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); in t7xx_dpmaif_set_data_to_skb()
640 data_base_addr = bat_skb->data_bus_addr; in t7xx_dpmaif_set_data_to_skb()
641 data_offset = data_bus_addr - data_base_addr; in t7xx_dpmaif_set_data_to_skb()
642 data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); in t7xx_dpmaif_set_data_to_skb()
643 skb = bat_skb->skb; in t7xx_dpmaif_set_data_to_skb()
644 skb->len = 0; in t7xx_dpmaif_set_data_to_skb()
648 if (skb->tail + data_len > skb->end) { in t7xx_dpmaif_set_data_to_skb()
650 return -ENOBUFS; in t7xx_dpmaif_set_data_to_skb()
654 skb_info->cur_skb = skb; in t7xx_dpmaif_set_data_to_skb()
655 bat_skb->skb = NULL; in t7xx_dpmaif_set_data_to_skb()
672 dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret); in t7xx_dpmaif_get_rx_pkt()
676 t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid); in t7xx_dpmaif_get_rx_pkt()
682 struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; in t7xx_dpmaifq_rx_notify_hw()
685 queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work); in t7xx_dpmaifq_rx_notify_hw()
689 dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret); in t7xx_dpmaifq_rx_notify_hw()
697 struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; in t7xx_dpmaif_rx_skb()
698 struct sk_buff *skb = skb_info->cur_skb; in t7xx_dpmaif_rx_skb()
702 skb_info->cur_skb = NULL; in t7xx_dpmaif_rx_skb()
704 if (skb_info->pit_dp) { in t7xx_dpmaif_rx_skb()
709 skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY : in t7xx_dpmaif_rx_skb()
711 netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx); in t7xx_dpmaif_rx_skb()
713 skb_cb->netif_idx = netif_id; in t7xx_dpmaif_rx_skb()
714 skb_cb->rx_pkt_type = skb_info->pkt_type; in t7xx_dpmaif_rx_skb()
715 dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi); in t7xx_dpmaif_rx_skb()
722 struct device *dev = rxq->dpmaif_ctrl->dev; in t7xx_dpmaif_rx_start()
726 pit_len = rxq->pit_size_cnt; in t7xx_dpmaif_rx_start()
727 skb_info = &rxq->rx_data_info; in t7xx_dpmaif_rx_start()
728 cur_pit = rxq->pit_rd_idx; in t7xx_dpmaif_rx_start()
734 if (!skb_info->msg_pit_received && recv_skb_cnt >= budget) in t7xx_dpmaif_rx_start()
737 pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit; in t7xx_dpmaif_rx_start()
739 dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index); in t7xx_dpmaif_rx_start()
744 val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header)); in t7xx_dpmaif_rx_start()
746 if (skb_info->msg_pit_received) in t7xx_dpmaif_rx_start()
747 dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index); in t7xx_dpmaif_rx_start()
749 skb_info->msg_pit_received = true; in t7xx_dpmaif_rx_start()
752 val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header)); in t7xx_dpmaif_rx_start()
755 else if (!skb_info->cur_skb) in t7xx_dpmaif_rx_start()
756 ret = -EINVAL; in t7xx_dpmaif_rx_start()
761 skb_info->err_payload = 1; in t7xx_dpmaif_rx_start()
762 dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index); in t7xx_dpmaif_rx_start()
765 val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header)); in t7xx_dpmaif_rx_start()
767 if (!skb_info->err_payload) { in t7xx_dpmaif_rx_start()
769 } else if (skb_info->cur_skb) { in t7xx_dpmaif_rx_start()
770 dev_kfree_skb_any(skb_info->cur_skb); in t7xx_dpmaif_rx_start()
771 skb_info->cur_skb = NULL; in t7xx_dpmaif_rx_start()
780 rxq->pit_rd_idx = cur_pit; in t7xx_dpmaif_rx_start()
781 rxq->pit_remain_release_cnt++; in t7xx_dpmaif_rx_start()
803 if (!rxq->que_started) in t7xx_dpmaifq_poll_pit()
806 hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index); in t7xx_dpmaifq_poll_pit()
807 pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx, in t7xx_dpmaifq_poll_pit()
809 rxq->pit_wr_idx = hw_wr_idx; in t7xx_dpmaifq_poll_pit()
817 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; in t7xx_dpmaif_napi_rx_data_collect()
827 dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret); in t7xx_dpmaif_napi_rx_data_collect()
835 struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev; in t7xx_dpmaif_napi_rx_poll()
838 atomic_set(&rxq->rx_processing, 1); in t7xx_dpmaif_napi_rx_poll()
842 if (!rxq->que_started) { in t7xx_dpmaif_napi_rx_poll()
843 atomic_set(&rxq->rx_processing, 0); in t7xx_dpmaif_napi_rx_poll()
844 pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev); in t7xx_dpmaif_napi_rx_poll()
845 dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); in t7xx_dpmaif_napi_rx_poll()
849 if (!rxq->sleep_lock_pending) in t7xx_dpmaif_napi_rx_poll()
852 ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire); in t7xx_dpmaif_napi_rx_poll()
855 rxq->sleep_lock_pending = true; in t7xx_dpmaif_napi_rx_poll()
860 rxq->sleep_lock_pending = false; in t7xx_dpmaif_napi_rx_poll()
862 int each_budget = budget - work_done; in t7xx_dpmaif_napi_rx_poll()
863 int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index, in t7xx_dpmaif_napi_rx_poll()
874 t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); in t7xx_dpmaif_napi_rx_poll()
877 t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); in t7xx_dpmaif_napi_rx_poll()
878 t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index); in t7xx_dpmaif_napi_rx_poll()
879 t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev); in t7xx_dpmaif_napi_rx_poll()
880 pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev); in t7xx_dpmaif_napi_rx_poll()
881 pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev); in t7xx_dpmaif_napi_rx_poll()
882 atomic_set(&rxq->rx_processing, 0); in t7xx_dpmaif_napi_rx_poll()
884 t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); in t7xx_dpmaif_napi_rx_poll()
896 qno = ffs(que_mask) - 1; in t7xx_dpmaif_irq_rx_done()
897 if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) { in t7xx_dpmaif_irq_rx_done()
898 dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno); in t7xx_dpmaif_irq_rx_done()
902 rxq = &dpmaif_ctrl->rxq[qno]; in t7xx_dpmaif_irq_rx_done()
903 ctrl = rxq->dpmaif_ctrl; in t7xx_dpmaif_irq_rx_done()
910 ret = pm_runtime_resume_and_get(ctrl->dev); in t7xx_dpmaif_irq_rx_done()
911 if (ret < 0 && ret != -EACCES) { in t7xx_dpmaif_irq_rx_done()
912 dev_err(ctrl->dev, "Failed to resume device: %d\n", ret); in t7xx_dpmaif_irq_rx_done()
915 napi_schedule(&rxq->napi); in t7xx_dpmaif_irq_rx_done()
921 if (bat_req->bat_base) in t7xx_dpmaif_base_free()
922 dma_free_coherent(dpmaif_ctrl->dev, in t7xx_dpmaif_base_free()
923 bat_req->bat_size_cnt * sizeof(struct dpmaif_bat), in t7xx_dpmaif_base_free()
924 bat_req->bat_base, bat_req->bat_bus_addr); in t7xx_dpmaif_base_free()
928 * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer.
933 * This function allocates the BAT ring buffer shared with the HW device, also allocates
937 * * 0 - Success.
938 * * -ERROR - Error code.
947 bat_req->bat_size_cnt = DPMAIF_FRG_COUNT; in t7xx_dpmaif_bat_alloc()
948 bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF; in t7xx_dpmaif_bat_alloc()
951 bat_req->bat_size_cnt = DPMAIF_BAT_COUNT; in t7xx_dpmaif_bat_alloc()
952 bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF; in t7xx_dpmaif_bat_alloc()
955 bat_req->type = buf_type; in t7xx_dpmaif_bat_alloc()
956 bat_req->bat_wr_idx = 0; in t7xx_dpmaif_bat_alloc()
957 bat_req->bat_release_rd_idx = 0; in t7xx_dpmaif_bat_alloc()
959 bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev, in t7xx_dpmaif_bat_alloc()
960 bat_req->bat_size_cnt * sizeof(struct dpmaif_bat), in t7xx_dpmaif_bat_alloc()
961 &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO); in t7xx_dpmaif_bat_alloc()
962 if (!bat_req->bat_base) in t7xx_dpmaif_bat_alloc()
963 return -ENOMEM; in t7xx_dpmaif_bat_alloc()
966 bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size, in t7xx_dpmaif_bat_alloc()
968 if (!bat_req->bat_skb) in t7xx_dpmaif_bat_alloc()
971 bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL); in t7xx_dpmaif_bat_alloc()
972 if (!bat_req->bat_bitmap) in t7xx_dpmaif_bat_alloc()
975 spin_lock_init(&bat_req->mask_lock); in t7xx_dpmaif_bat_alloc()
976 atomic_set(&bat_req->refcnt, 0); in t7xx_dpmaif_bat_alloc()
982 return -ENOMEM; in t7xx_dpmaif_bat_alloc()
987 if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt)) in t7xx_dpmaif_bat_free()
990 bitmap_free(bat_req->bat_bitmap); in t7xx_dpmaif_bat_free()
991 bat_req->bat_bitmap = NULL; in t7xx_dpmaif_bat_free()
993 if (bat_req->bat_skb) { in t7xx_dpmaif_bat_free()
996 for (i = 0; i < bat_req->bat_size_cnt; i++) { in t7xx_dpmaif_bat_free()
997 if (bat_req->type == BAT_TYPE_FRAG) in t7xx_dpmaif_bat_free()
998 t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i); in t7xx_dpmaif_bat_free()
1000 t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); in t7xx_dpmaif_bat_free()
1009 rxq->pit_size_cnt = DPMAIF_PIT_COUNT; in t7xx_dpmaif_rx_alloc()
1010 rxq->pit_rd_idx = 0; in t7xx_dpmaif_rx_alloc()
1011 rxq->pit_wr_idx = 0; in t7xx_dpmaif_rx_alloc()
1012 rxq->pit_release_rd_idx = 0; in t7xx_dpmaif_rx_alloc()
1013 rxq->expect_pit_seq = 0; in t7xx_dpmaif_rx_alloc()
1014 rxq->pit_remain_release_cnt = 0; in t7xx_dpmaif_rx_alloc()
1015 memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info)); in t7xx_dpmaif_rx_alloc()
1017 rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev, in t7xx_dpmaif_rx_alloc()
1018 rxq->pit_size_cnt * sizeof(struct dpmaif_pit), in t7xx_dpmaif_rx_alloc()
1019 &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO); in t7xx_dpmaif_rx_alloc()
1020 if (!rxq->pit_base) in t7xx_dpmaif_rx_alloc()
1021 return -ENOMEM; in t7xx_dpmaif_rx_alloc()
1023 rxq->bat_req = &rxq->dpmaif_ctrl->bat_req; in t7xx_dpmaif_rx_alloc()
1024 atomic_inc(&rxq->bat_req->refcnt); in t7xx_dpmaif_rx_alloc()
1026 rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag; in t7xx_dpmaif_rx_alloc()
1027 atomic_inc(&rxq->bat_frag->refcnt); in t7xx_dpmaif_rx_alloc()
1033 if (!rxq->dpmaif_ctrl) in t7xx_dpmaif_rx_buf_free()
1036 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); in t7xx_dpmaif_rx_buf_free()
1037 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); in t7xx_dpmaif_rx_buf_free()
1039 if (rxq->pit_base) in t7xx_dpmaif_rx_buf_free()
1040 dma_free_coherent(rxq->dpmaif_ctrl->dev, in t7xx_dpmaif_rx_buf_free()
1041 rxq->pit_size_cnt * sizeof(struct dpmaif_pit), in t7xx_dpmaif_rx_buf_free()
1042 rxq->pit_base, rxq->pit_bus_addr); in t7xx_dpmaif_rx_buf_free()
1051 dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret); in t7xx_dpmaif_rxq_init()
1067 ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); in t7xx_dpmaif_bat_release_work()
1068 if (ret < 0 && ret != -EACCES) in t7xx_dpmaif_bat_release_work()
1071 t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); in t7xx_dpmaif_bat_release_work()
1074 rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; in t7xx_dpmaif_bat_release_work()
1075 if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { in t7xx_dpmaif_bat_release_work()
1080 t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); in t7xx_dpmaif_bat_release_work()
1081 pm_runtime_mark_last_busy(dpmaif_ctrl->dev); in t7xx_dpmaif_bat_release_work()
1082 pm_runtime_put_autosuspend(dpmaif_ctrl->dev); in t7xx_dpmaif_bat_release_work()
1087 dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue", in t7xx_dpmaif_bat_rel_wq_alloc()
1089 if (!dpmaif_ctrl->bat_release_wq) in t7xx_dpmaif_bat_rel_wq_alloc()
1090 return -ENOMEM; in t7xx_dpmaif_bat_rel_wq_alloc()
1092 INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work); in t7xx_dpmaif_bat_rel_wq_alloc()
1098 flush_work(&dpmaif_ctrl->bat_release_work); in t7xx_dpmaif_bat_wq_rel()
1100 if (dpmaif_ctrl->bat_release_wq) { in t7xx_dpmaif_bat_wq_rel()
1101 destroy_workqueue(dpmaif_ctrl->bat_release_wq); in t7xx_dpmaif_bat_wq_rel()
1102 dpmaif_ctrl->bat_release_wq = NULL; in t7xx_dpmaif_bat_wq_rel()
1107 * t7xx_dpmaif_rx_stop() - Suspend RX flow.
1117 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i]; in t7xx_dpmaif_rx_stop()
1120 timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value, in t7xx_dpmaif_rx_stop()
1123 dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n"); in t7xx_dpmaif_rx_stop()
1125 /* Ensure RX processing has stopped before we set rxq->que_started to false */ in t7xx_dpmaif_rx_stop()
1127 rxq->que_started = false; in t7xx_dpmaif_rx_stop()
1135 rxq->que_started = false; in t7xx_dpmaif_stop_rxq()
1138 cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, in t7xx_dpmaif_stop_rxq()
1139 rxq->pit_wr_idx, DPMAIF_READ); in t7xx_dpmaif_stop_rxq()
1142 dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt); in t7xx_dpmaif_stop_rxq()
1147 memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit)); in t7xx_dpmaif_stop_rxq()
1148 memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat)); in t7xx_dpmaif_stop_rxq()
1149 bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt); in t7xx_dpmaif_stop_rxq()
1150 memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info)); in t7xx_dpmaif_stop_rxq()
1152 rxq->pit_rd_idx = 0; in t7xx_dpmaif_stop_rxq()
1153 rxq->pit_wr_idx = 0; in t7xx_dpmaif_stop_rxq()
1154 rxq->pit_release_rd_idx = 0; in t7xx_dpmaif_stop_rxq()
1155 rxq->expect_pit_seq = 0; in t7xx_dpmaif_stop_rxq()
1156 rxq->pit_remain_release_cnt = 0; in t7xx_dpmaif_stop_rxq()
1157 rxq->bat_req->bat_release_rd_idx = 0; in t7xx_dpmaif_stop_rxq()
1158 rxq->bat_req->bat_wr_idx = 0; in t7xx_dpmaif_stop_rxq()
1159 rxq->bat_frag->bat_release_rd_idx = 0; in t7xx_dpmaif_stop_rxq()
1160 rxq->bat_frag->bat_wr_idx = 0; in t7xx_dpmaif_stop_rxq()
1168 t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]); in t7xx_dpmaif_rx_clear()