Lines Matching +full:mbi +full:- +full:ranges

1 // SPDX-License-Identifier: GPL-2.0-only
42 if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask))) in airoha_qdma_set_irqmask()
45 spin_lock_irqsave(&qdma->irq_lock, flags); in airoha_qdma_set_irqmask()
47 qdma->irqmask[index] &= ~clear; in airoha_qdma_set_irqmask()
48 qdma->irqmask[index] |= set; in airoha_qdma_set_irqmask()
49 airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]); in airoha_qdma_set_irqmask()
55 spin_unlock_irqrestore(&qdma->irq_lock, flags); in airoha_qdma_set_irqmask()
76 return port->id == 1; in airhoa_is_lan_gdm_port()
81 struct airoha_eth *eth = port->qdma->eth; in airoha_set_macaddr()
110 struct airoha_eth *eth = port->qdma->eth; in airoha_set_vip_for_gdm_port()
113 switch (port->id) { in airoha_set_vip_for_gdm_port()
141 for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) in airoha_fe_maccr_init()
206 /* ETH->ETH_P_1905 (0x893a) */ in airoha_fe_vip_setup()
260 all_rsv += (val - orig_val); in airoha_fe_set_pse_oq_rsv()
267 tmp = fq_limit - all_rsv - 0x20; in airoha_fe_set_pse_oq_rsv()
272 tmp = fq_limit - all_rsv - 0x100; in airoha_fe_set_pse_oq_rsv()
335 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) in airoha_fe_pse_ports_init()
484 /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 in airoha_fe_init()
485 * connect other rings to PSE Port0 OQ-0 in airoha_fe_init()
502 /* NPU Core-3, NPU Bridge Channel-3 */ in airoha_fe_init()
507 /* QDMA LAN, RX Ring-22 */ in airoha_fe_init()
521 /* default aging mode for mbi unlock issue */ in airoha_fe_init()
538 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); in airoha_qdma_fill_rx_queue()
539 struct airoha_qdma *qdma = q->qdma; in airoha_qdma_fill_rx_queue()
540 struct airoha_eth *eth = qdma->eth; in airoha_qdma_fill_rx_queue()
541 int qid = q - &qdma->q_rx[0]; in airoha_qdma_fill_rx_queue()
544 while (q->queued < q->ndesc - 1) { in airoha_qdma_fill_rx_queue()
545 struct airoha_queue_entry *e = &q->entry[q->head]; in airoha_qdma_fill_rx_queue()
546 struct airoha_qdma_desc *desc = &q->desc[q->head]; in airoha_qdma_fill_rx_queue()
551 page = page_pool_dev_alloc_frag(q->page_pool, &offset, in airoha_qdma_fill_rx_queue()
552 q->buf_size); in airoha_qdma_fill_rx_queue()
556 q->head = (q->head + 1) % q->ndesc; in airoha_qdma_fill_rx_queue()
557 q->queued++; in airoha_qdma_fill_rx_queue()
560 e->buf = page_address(page) + offset; in airoha_qdma_fill_rx_queue()
561 e->dma_addr = page_pool_get_dma_addr(page) + offset; in airoha_qdma_fill_rx_queue()
562 e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); in airoha_qdma_fill_rx_queue()
564 dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_fill_rx_queue()
567 val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); in airoha_qdma_fill_rx_queue()
568 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); in airoha_qdma_fill_rx_queue()
569 WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); in airoha_qdma_fill_rx_queue()
570 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); in airoha_qdma_fill_rx_queue()
571 WRITE_ONCE(desc->data, cpu_to_le32(val)); in airoha_qdma_fill_rx_queue()
572 WRITE_ONCE(desc->msg0, 0); in airoha_qdma_fill_rx_queue()
573 WRITE_ONCE(desc->msg1, 0); in airoha_qdma_fill_rx_queue()
574 WRITE_ONCE(desc->msg2, 0); in airoha_qdma_fill_rx_queue()
575 WRITE_ONCE(desc->msg3, 0); in airoha_qdma_fill_rx_queue()
579 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); in airoha_qdma_fill_rx_queue()
588 u32 port, sport, msg1 = le32_to_cpu(desc->msg1); in airoha_qdma_get_gdm_port()
596 port = sport - 1; in airoha_qdma_get_gdm_port()
599 return -EINVAL; in airoha_qdma_get_gdm_port()
602 return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; in airoha_qdma_get_gdm_port()
607 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); in airoha_qdma_rx_process()
608 struct airoha_qdma *qdma = q->qdma; in airoha_qdma_rx_process()
609 struct airoha_eth *eth = qdma->eth; in airoha_qdma_rx_process()
610 int qid = q - &qdma->q_rx[0]; in airoha_qdma_rx_process()
614 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_rx_process()
615 struct airoha_qdma_desc *desc = &q->desc[q->tail]; in airoha_qdma_rx_process()
616 u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); in airoha_qdma_rx_process()
617 struct page *page = virt_to_head_page(e->buf); in airoha_qdma_rx_process()
618 u32 desc_ctrl = le32_to_cpu(desc->ctrl); in airoha_qdma_rx_process()
625 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_rx_process()
626 q->queued--; in airoha_qdma_rx_process()
628 dma_sync_single_for_cpu(eth->dev, e->dma_addr, in airoha_qdma_rx_process()
629 SKB_WITH_OVERHEAD(q->buf_size), dir); in airoha_qdma_rx_process()
632 data_len = q->skb ? q->buf_size in airoha_qdma_rx_process()
633 : SKB_WITH_OVERHEAD(q->buf_size); in airoha_qdma_rx_process()
638 if (p < 0 || !eth->ports[p]) in airoha_qdma_rx_process()
641 port = eth->ports[p]; in airoha_qdma_rx_process()
642 if (!q->skb) { /* first buffer */ in airoha_qdma_rx_process()
643 q->skb = napi_build_skb(e->buf, q->buf_size); in airoha_qdma_rx_process()
644 if (!q->skb) in airoha_qdma_rx_process()
647 __skb_put(q->skb, len); in airoha_qdma_rx_process()
648 skb_mark_for_recycle(q->skb); in airoha_qdma_rx_process()
649 q->skb->dev = port->dev; in airoha_qdma_rx_process()
650 q->skb->protocol = eth_type_trans(q->skb, port->dev); in airoha_qdma_rx_process()
651 q->skb->ip_summed = CHECKSUM_UNNECESSARY; in airoha_qdma_rx_process()
652 skb_record_rx_queue(q->skb, qid); in airoha_qdma_rx_process()
654 struct skb_shared_info *shinfo = skb_shinfo(q->skb); in airoha_qdma_rx_process()
655 int nr_frags = shinfo->nr_frags; in airoha_qdma_rx_process()
657 if (nr_frags >= ARRAY_SIZE(shinfo->frags)) in airoha_qdma_rx_process()
660 skb_add_rx_frag(q->skb, nr_frags, page, in airoha_qdma_rx_process()
661 e->buf - page_address(page), len, in airoha_qdma_rx_process()
662 q->buf_size); in airoha_qdma_rx_process()
668 if (netdev_uses_dsa(port->dev)) { in airoha_qdma_rx_process()
675 le32_to_cpu(desc->msg0)); in airoha_qdma_rx_process()
677 if (sptag < ARRAY_SIZE(port->dsa_meta) && in airoha_qdma_rx_process()
678 port->dsa_meta[sptag]) in airoha_qdma_rx_process()
679 skb_dst_set_noref(q->skb, in airoha_qdma_rx_process()
680 &port->dsa_meta[sptag]->dst); in airoha_qdma_rx_process()
685 skb_set_hash(q->skb, jhash_1word(hash, 0), in airoha_qdma_rx_process()
690 airoha_ppe_check_skb(eth->ppe, hash); in airoha_qdma_rx_process()
693 napi_gro_receive(&q->napi, q->skb); in airoha_qdma_rx_process()
694 q->skb = NULL; in airoha_qdma_rx_process()
697 if (q->skb) { in airoha_qdma_rx_process()
698 dev_kfree_skb(q->skb); in airoha_qdma_rx_process()
699 q->skb = NULL; in airoha_qdma_rx_process()
701 page_pool_put_full_page(q->page_pool, page, true); in airoha_qdma_rx_process()
715 cur = airoha_qdma_rx_process(q, budget - done); in airoha_qdma_rx_napi_poll()
720 airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1, in airoha_qdma_rx_napi_poll()
736 .dev = qdma->eth->dev, in airoha_qdma_init_rx_queue()
737 .napi = &q->napi, in airoha_qdma_init_rx_queue()
739 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_rx_queue()
740 int qid = q - &qdma->q_rx[0], thr; in airoha_qdma_init_rx_queue()
743 q->buf_size = PAGE_SIZE / 2; in airoha_qdma_init_rx_queue()
744 q->ndesc = ndesc; in airoha_qdma_init_rx_queue()
745 q->qdma = qdma; in airoha_qdma_init_rx_queue()
747 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), in airoha_qdma_init_rx_queue()
749 if (!q->entry) in airoha_qdma_init_rx_queue()
750 return -ENOMEM; in airoha_qdma_init_rx_queue()
752 q->page_pool = page_pool_create(&pp_params); in airoha_qdma_init_rx_queue()
753 if (IS_ERR(q->page_pool)) { in airoha_qdma_init_rx_queue()
754 int err = PTR_ERR(q->page_pool); in airoha_qdma_init_rx_queue()
756 q->page_pool = NULL; in airoha_qdma_init_rx_queue()
760 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), in airoha_qdma_init_rx_queue()
762 if (!q->desc) in airoha_qdma_init_rx_queue()
763 return -ENOMEM; in airoha_qdma_init_rx_queue()
765 netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); in airoha_qdma_init_rx_queue()
776 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); in airoha_qdma_init_rx_queue()
786 struct airoha_eth *eth = q->qdma->eth; in airoha_qdma_cleanup_rx_queue()
788 while (q->queued) { in airoha_qdma_cleanup_rx_queue()
789 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_cleanup_rx_queue()
790 struct page *page = virt_to_head_page(e->buf); in airoha_qdma_cleanup_rx_queue()
792 dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_cleanup_rx_queue()
793 page_pool_get_dma_dir(q->page_pool)); in airoha_qdma_cleanup_rx_queue()
794 page_pool_put_full_page(q->page_pool, page, false); in airoha_qdma_cleanup_rx_queue()
795 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_cleanup_rx_queue()
796 q->queued--; in airoha_qdma_cleanup_rx_queue()
804 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_init_rx()
808 /* rx-queue not binded to irq */ in airoha_qdma_init_rx()
812 err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, in airoha_qdma_init_rx()
830 qdma = irq_q->qdma; in airoha_qdma_tx_napi_poll()
831 id = irq_q - &qdma->q_tx_irq[0]; in airoha_qdma_tx_napi_poll()
832 eth = qdma->eth; in airoha_qdma_tx_napi_poll()
836 head = head % irq_q->size; in airoha_qdma_tx_napi_poll()
840 u32 qid, val = irq_q->q[head]; in airoha_qdma_tx_napi_poll()
850 irq_q->q[head] = 0xff; /* mark as done */ in airoha_qdma_tx_napi_poll()
851 head = (head + 1) % irq_q->size; in airoha_qdma_tx_napi_poll()
852 irq_queued--; in airoha_qdma_tx_napi_poll()
856 if (qid >= ARRAY_SIZE(qdma->q_tx)) in airoha_qdma_tx_napi_poll()
859 q = &qdma->q_tx[qid]; in airoha_qdma_tx_napi_poll()
860 if (!q->ndesc) in airoha_qdma_tx_napi_poll()
864 if (index >= q->ndesc) in airoha_qdma_tx_napi_poll()
867 spin_lock_bh(&q->lock); in airoha_qdma_tx_napi_poll()
869 if (!q->queued) in airoha_qdma_tx_napi_poll()
872 desc = &q->desc[index]; in airoha_qdma_tx_napi_poll()
873 desc_ctrl = le32_to_cpu(desc->ctrl); in airoha_qdma_tx_napi_poll()
879 e = &q->entry[index]; in airoha_qdma_tx_napi_poll()
880 skb = e->skb; in airoha_qdma_tx_napi_poll()
882 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_tx_napi_poll()
885 WRITE_ONCE(desc->msg0, 0); in airoha_qdma_tx_napi_poll()
886 WRITE_ONCE(desc->msg1, 0); in airoha_qdma_tx_napi_poll()
887 q->queued--; in airoha_qdma_tx_napi_poll()
889 /* completion ring can report out-of-order indexes if hw QoS in airoha_qdma_tx_napi_poll()
891 * to same DMA ring. Take into account possible out-of-order in airoha_qdma_tx_napi_poll()
894 while (q->tail != q->head && !q->entry[q->tail].dma_addr) in airoha_qdma_tx_napi_poll()
895 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_tx_napi_poll()
901 txq = netdev_get_tx_queue(skb->dev, queue); in airoha_qdma_tx_napi_poll()
902 netdev_tx_completed_queue(txq, 1, skb->len); in airoha_qdma_tx_napi_poll()
904 q->ndesc - q->queued >= q->free_thr) in airoha_qdma_tx_napi_poll()
910 spin_unlock_bh(&q->lock); in airoha_qdma_tx_napi_poll()
933 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_tx_queue()
934 int i, qid = q - &qdma->q_tx[0]; in airoha_qdma_init_tx_queue()
937 spin_lock_init(&q->lock); in airoha_qdma_init_tx_queue()
938 q->ndesc = size; in airoha_qdma_init_tx_queue()
939 q->qdma = qdma; in airoha_qdma_init_tx_queue()
940 q->free_thr = 1 + MAX_SKB_FRAGS; in airoha_qdma_init_tx_queue()
942 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), in airoha_qdma_init_tx_queue()
944 if (!q->entry) in airoha_qdma_init_tx_queue()
945 return -ENOMEM; in airoha_qdma_init_tx_queue()
947 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), in airoha_qdma_init_tx_queue()
949 if (!q->desc) in airoha_qdma_init_tx_queue()
950 return -ENOMEM; in airoha_qdma_init_tx_queue()
952 for (i = 0; i < q->ndesc; i++) { in airoha_qdma_init_tx_queue()
956 WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); in airoha_qdma_init_tx_queue()
965 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); in airoha_qdma_init_tx_queue()
967 FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); in airoha_qdma_init_tx_queue()
975 int id = irq_q - &qdma->q_tx_irq[0]; in airoha_qdma_tx_irq_init()
976 struct airoha_eth *eth = qdma->eth; in airoha_qdma_tx_irq_init()
979 netif_napi_add_tx(eth->napi_dev, &irq_q->napi, in airoha_qdma_tx_irq_init()
981 irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), in airoha_qdma_tx_irq_init()
983 if (!irq_q->q) in airoha_qdma_tx_irq_init()
984 return -ENOMEM; in airoha_qdma_tx_irq_init()
986 memset(irq_q->q, 0xff, size * sizeof(u32)); in airoha_qdma_tx_irq_init()
987 irq_q->size = size; in airoha_qdma_tx_irq_init()
988 irq_q->qdma = qdma; in airoha_qdma_tx_irq_init()
1003 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { in airoha_qdma_init_tx()
1004 err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, in airoha_qdma_init_tx()
1010 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_qdma_init_tx()
1011 err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, in airoha_qdma_init_tx()
1022 struct airoha_eth *eth = q->qdma->eth; in airoha_qdma_cleanup_tx_queue()
1024 spin_lock_bh(&q->lock); in airoha_qdma_cleanup_tx_queue()
1025 while (q->queued) { in airoha_qdma_cleanup_tx_queue()
1026 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_cleanup_tx_queue()
1028 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_cleanup_tx_queue()
1030 dev_kfree_skb_any(e->skb); in airoha_qdma_cleanup_tx_queue()
1031 e->skb = NULL; in airoha_qdma_cleanup_tx_queue()
1033 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_cleanup_tx_queue()
1034 q->queued--; in airoha_qdma_cleanup_tx_queue()
1036 spin_unlock_bh(&q->lock); in airoha_qdma_cleanup_tx_queue()
1041 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_hfwd_queues()
1047 qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, in airoha_qdma_init_hfwd_queues()
1049 if (!qdma->hfwd.desc) in airoha_qdma_init_hfwd_queues()
1050 return -ENOMEM; in airoha_qdma_init_hfwd_queues()
1055 qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, in airoha_qdma_init_hfwd_queues()
1057 if (!qdma->hfwd.q) in airoha_qdma_init_hfwd_queues()
1058 return -ENOMEM; in airoha_qdma_init_hfwd_queues()
1100 /* fast-tick 25us */ in airoha_qdma_init_qos()
1134 /* Tx-cpu transferred count */ in airoha_qdma_init_qos_stats()
1140 /* Tx-fwd transferred count */ in airoha_qdma_init_qos_stats()
1155 for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) in airoha_qdma_hw_init()
1164 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_qdma_hw_init()
1165 if (!qdma->q_tx[i].ndesc) in airoha_qdma_hw_init()
1189 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_hw_init()
1190 if (!qdma->q_rx[i].ndesc) in airoha_qdma_hw_init()
1207 u32 intr[ARRAY_SIZE(qdma->irqmask)]; in airoha_irq_handler()
1210 for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) { in airoha_irq_handler()
1212 intr[i] &= qdma->irqmask[i]; in airoha_irq_handler()
1216 if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) in airoha_irq_handler()
1223 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_irq_handler()
1224 if (!qdma->q_rx[i].ndesc) in airoha_irq_handler()
1228 napi_schedule(&qdma->q_rx[i].napi); in airoha_irq_handler()
1233 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { in airoha_irq_handler()
1239 napi_schedule(&qdma->q_tx_irq[i].napi); in airoha_irq_handler()
1250 int err, id = qdma - &eth->qdma[0]; in airoha_qdma_init()
1253 spin_lock_init(&qdma->irq_lock); in airoha_qdma_init()
1254 qdma->eth = eth; in airoha_qdma_init()
1256 res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); in airoha_qdma_init()
1258 return -ENOMEM; in airoha_qdma_init()
1260 qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); in airoha_qdma_init()
1261 if (IS_ERR(qdma->regs)) in airoha_qdma_init()
1262 return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), in airoha_qdma_init()
1265 qdma->irq = platform_get_irq(pdev, 4 * id); in airoha_qdma_init()
1266 if (qdma->irq < 0) in airoha_qdma_init()
1267 return qdma->irq; in airoha_qdma_init()
1269 err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler, in airoha_qdma_init()
1295 err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), in airoha_hw_init()
1296 eth->xsi_rsts); in airoha_hw_init()
1300 err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); in airoha_hw_init()
1305 err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); in airoha_hw_init()
1314 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { in airoha_hw_init()
1315 err = airoha_qdma_init(pdev, eth, &eth->qdma[i]); in airoha_hw_init()
1324 set_bit(DEV_STATE_INITIALIZED, &eth->state); in airoha_hw_init()
1333 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_hw_cleanup()
1334 if (!qdma->q_rx[i].ndesc) in airoha_hw_cleanup()
1337 netif_napi_del(&qdma->q_rx[i].napi); in airoha_hw_cleanup()
1338 airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); in airoha_hw_cleanup()
1339 if (qdma->q_rx[i].page_pool) in airoha_hw_cleanup()
1340 page_pool_destroy(qdma->q_rx[i].page_pool); in airoha_hw_cleanup()
1343 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) in airoha_hw_cleanup()
1344 netif_napi_del(&qdma->q_tx_irq[i].napi); in airoha_hw_cleanup()
1346 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_hw_cleanup()
1347 if (!qdma->q_tx[i].ndesc) in airoha_hw_cleanup()
1350 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); in airoha_hw_cleanup()
1358 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) in airoha_qdma_start_napi()
1359 napi_enable(&qdma->q_tx_irq[i].napi); in airoha_qdma_start_napi()
1361 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_start_napi()
1362 if (!qdma->q_rx[i].ndesc) in airoha_qdma_start_napi()
1365 napi_enable(&qdma->q_rx[i].napi); in airoha_qdma_start_napi()
1373 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) in airoha_qdma_stop_napi()
1374 napi_disable(&qdma->q_tx_irq[i].napi); in airoha_qdma_stop_napi()
1376 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_stop_napi()
1377 if (!qdma->q_rx[i].ndesc) in airoha_qdma_stop_napi()
1380 napi_disable(&qdma->q_rx[i].napi); in airoha_qdma_stop_napi()
1386 struct airoha_eth *eth = port->qdma->eth; in airoha_update_hw_stats()
1389 spin_lock(&port->stats.lock); in airoha_update_hw_stats()
1390 u64_stats_update_begin(&port->stats.syncp); in airoha_update_hw_stats()
1393 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id)); in airoha_update_hw_stats()
1394 port->stats.tx_ok_pkts += ((u64)val << 32); in airoha_update_hw_stats()
1395 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id)); in airoha_update_hw_stats()
1396 port->stats.tx_ok_pkts += val; in airoha_update_hw_stats()
1398 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id)); in airoha_update_hw_stats()
1399 port->stats.tx_ok_bytes += ((u64)val << 32); in airoha_update_hw_stats()
1400 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id)); in airoha_update_hw_stats()
1401 port->stats.tx_ok_bytes += val; in airoha_update_hw_stats()
1403 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id)); in airoha_update_hw_stats()
1404 port->stats.tx_drops += val; in airoha_update_hw_stats()
1406 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id)); in airoha_update_hw_stats()
1407 port->stats.tx_broadcast += val; in airoha_update_hw_stats()
1409 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id)); in airoha_update_hw_stats()
1410 port->stats.tx_multicast += val; in airoha_update_hw_stats()
1412 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id)); in airoha_update_hw_stats()
1413 port->stats.tx_len[i] += val; in airoha_update_hw_stats()
1415 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id)); in airoha_update_hw_stats()
1416 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1417 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id)); in airoha_update_hw_stats()
1418 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1420 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id)); in airoha_update_hw_stats()
1421 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1422 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id)); in airoha_update_hw_stats()
1423 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1425 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id)); in airoha_update_hw_stats()
1426 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1427 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id)); in airoha_update_hw_stats()
1428 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1430 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id)); in airoha_update_hw_stats()
1431 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1432 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id)); in airoha_update_hw_stats()
1433 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1435 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id)); in airoha_update_hw_stats()
1436 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1437 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id)); in airoha_update_hw_stats()
1438 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1440 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id)); in airoha_update_hw_stats()
1441 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1442 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id)); in airoha_update_hw_stats()
1443 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1445 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id)); in airoha_update_hw_stats()
1446 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1449 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id)); in airoha_update_hw_stats()
1450 port->stats.rx_ok_pkts += ((u64)val << 32); in airoha_update_hw_stats()
1451 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id)); in airoha_update_hw_stats()
1452 port->stats.rx_ok_pkts += val; in airoha_update_hw_stats()
1454 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id)); in airoha_update_hw_stats()
1455 port->stats.rx_ok_bytes += ((u64)val << 32); in airoha_update_hw_stats()
1456 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id)); in airoha_update_hw_stats()
1457 port->stats.rx_ok_bytes += val; in airoha_update_hw_stats()
1459 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id)); in airoha_update_hw_stats()
1460 port->stats.rx_drops += val; in airoha_update_hw_stats()
1462 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id)); in airoha_update_hw_stats()
1463 port->stats.rx_broadcast += val; in airoha_update_hw_stats()
1465 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id)); in airoha_update_hw_stats()
1466 port->stats.rx_multicast += val; in airoha_update_hw_stats()
1468 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id)); in airoha_update_hw_stats()
1469 port->stats.rx_errors += val; in airoha_update_hw_stats()
1471 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id)); in airoha_update_hw_stats()
1472 port->stats.rx_crc_error += val; in airoha_update_hw_stats()
1474 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id)); in airoha_update_hw_stats()
1475 port->stats.rx_over_errors += val; in airoha_update_hw_stats()
1477 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id)); in airoha_update_hw_stats()
1478 port->stats.rx_fragment += val; in airoha_update_hw_stats()
1480 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id)); in airoha_update_hw_stats()
1481 port->stats.rx_jabber += val; in airoha_update_hw_stats()
1484 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id)); in airoha_update_hw_stats()
1485 port->stats.rx_len[i] += val; in airoha_update_hw_stats()
1487 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id)); in airoha_update_hw_stats()
1488 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1489 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id)); in airoha_update_hw_stats()
1490 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1492 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id)); in airoha_update_hw_stats()
1493 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1494 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id)); in airoha_update_hw_stats()
1495 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1497 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id)); in airoha_update_hw_stats()
1498 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1499 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id)); in airoha_update_hw_stats()
1500 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1502 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id)); in airoha_update_hw_stats()
1503 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1504 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id)); in airoha_update_hw_stats()
1505 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1507 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id)); in airoha_update_hw_stats()
1508 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1509 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id)); in airoha_update_hw_stats()
1510 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1512 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id)); in airoha_update_hw_stats()
1513 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1514 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id)); in airoha_update_hw_stats()
1515 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1517 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id)); in airoha_update_hw_stats()
1518 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1521 airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id), in airoha_update_hw_stats()
1524 u64_stats_update_end(&port->stats.syncp); in airoha_update_hw_stats()
1525 spin_unlock(&port->stats.lock); in airoha_update_hw_stats()
1530 int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN; in airoha_dev_open()
1532 struct airoha_qdma *qdma = port->qdma; in airoha_dev_open()
1540 airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), in airoha_dev_open()
1543 airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), in airoha_dev_open()
1546 airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id), in airoha_dev_open()
1554 atomic_inc(&qdma->users); in airoha_dev_open()
1562 struct airoha_qdma *qdma = port->qdma; in airoha_dev_stop()
1570 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) in airoha_dev_stop()
1573 if (atomic_dec_and_test(&qdma->users)) { in airoha_dev_stop()
1578 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_dev_stop()
1579 if (!qdma->q_tx[i].ndesc) in airoha_dev_stop()
1582 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); in airoha_dev_stop()
1598 airoha_set_macaddr(port, dev->dev_addr); in airoha_dev_set_macaddr()
1605 u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4; in airhoha_set_gdm2_loopback()
1606 struct airoha_eth *eth = port->qdma->eth; in airhoha_set_gdm2_loopback()
1607 u32 chan = port->id == 3 ? 4 : 0; in airhoha_set_gdm2_loopback()
1628 if (port->id == 3) { in airhoha_set_gdm2_loopback()
1657 struct airoha_eth *eth = port->qdma->eth; in airoha_dev_init()
1660 airoha_set_macaddr(port, dev->dev_addr); in airoha_dev_init()
1662 switch (port->id) { in airoha_dev_init()
1666 if (!eth->ports[1]) in airoha_dev_init()
1677 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port); in airoha_dev_init()
1690 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_dev_get_stats64()
1691 storage->rx_packets = port->stats.rx_ok_pkts; in airoha_dev_get_stats64()
1692 storage->tx_packets = port->stats.tx_ok_pkts; in airoha_dev_get_stats64()
1693 storage->rx_bytes = port->stats.rx_ok_bytes; in airoha_dev_get_stats64()
1694 storage->tx_bytes = port->stats.tx_ok_bytes; in airoha_dev_get_stats64()
1695 storage->multicast = port->stats.rx_multicast; in airoha_dev_get_stats64()
1696 storage->rx_errors = port->stats.rx_errors; in airoha_dev_get_stats64()
1697 storage->rx_dropped = port->stats.rx_drops; in airoha_dev_get_stats64()
1698 storage->tx_dropped = port->stats.tx_drops; in airoha_dev_get_stats64()
1699 storage->rx_crc_errors = port->stats.rx_crc_error; in airoha_dev_get_stats64()
1700 storage->rx_over_errors = port->stats.rx_over_errors; in airoha_dev_get_stats64()
1701 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_dev_get_stats64()
1707 struct airoha_eth *eth = port->qdma->eth; in airoha_dev_change_mtu()
1710 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id), in airoha_dev_change_mtu()
1713 WRITE_ONCE(dev->mtu, mtu); in airoha_dev_change_mtu()
1728 channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id; in airoha_dev_select_queue()
1730 queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */ in airoha_dev_select_queue()
1733 return queue < dev->num_tx_queues ? queue : 0; in airoha_dev_select_queue()
1746 if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) in airoha_get_dsa_tag()
1752 ehdr = (struct ethhdr *)skb->data; in airoha_get_dsa_tag()
1753 tag = be16_to_cpu(ehdr->h_proto); in airoha_get_dsa_tag()
1758 ehdr->h_proto = cpu_to_be16(ETH_P_8021Q); in airoha_get_dsa_tag()
1762 ehdr->h_proto = cpu_to_be16(ETH_P_8021AD); in airoha_get_dsa_tag()
1769 memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN); in airoha_get_dsa_tag()
1784 struct airoha_qdma *qdma = port->qdma; in airoha_dev_xmit()
1793 qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx); in airoha_dev_xmit()
1801 if (skb->ip_summed == CHECKSUM_PARTIAL) in airoha_dev_xmit()
1811 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | in airoha_dev_xmit()
1813 __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size); in airoha_dev_xmit()
1815 tcp_hdr(skb)->check = (__force __sum16)csum; in airoha_dev_xmit()
1820 fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; in airoha_dev_xmit()
1824 q = &qdma->q_tx[qid]; in airoha_dev_xmit()
1825 if (WARN_ON_ONCE(!q->ndesc)) in airoha_dev_xmit()
1828 spin_lock_bh(&q->lock); in airoha_dev_xmit()
1831 nr_frags = 1 + skb_shinfo(skb)->nr_frags; in airoha_dev_xmit()
1833 if (q->queued + nr_frags > q->ndesc) { in airoha_dev_xmit()
1836 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
1841 data = skb->data; in airoha_dev_xmit()
1842 index = q->head; in airoha_dev_xmit()
1845 struct airoha_qdma_desc *desc = &q->desc[index]; in airoha_dev_xmit()
1846 struct airoha_queue_entry *e = &q->entry[index]; in airoha_dev_xmit()
1847 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in airoha_dev_xmit()
1851 addr = dma_map_single(dev->dev.parent, data, len, in airoha_dev_xmit()
1853 if (unlikely(dma_mapping_error(dev->dev.parent, addr))) in airoha_dev_xmit()
1856 index = (index + 1) % q->ndesc; in airoha_dev_xmit()
1859 if (i < nr_frags - 1) in airoha_dev_xmit()
1861 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); in airoha_dev_xmit()
1862 WRITE_ONCE(desc->addr, cpu_to_le32(addr)); in airoha_dev_xmit()
1864 WRITE_ONCE(desc->data, cpu_to_le32(val)); in airoha_dev_xmit()
1865 WRITE_ONCE(desc->msg0, cpu_to_le32(msg0)); in airoha_dev_xmit()
1866 WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); in airoha_dev_xmit()
1867 WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); in airoha_dev_xmit()
1869 e->skb = i ? NULL : skb; in airoha_dev_xmit()
1870 e->dma_addr = addr; in airoha_dev_xmit()
1871 e->dma_len = len; in airoha_dev_xmit()
1877 q->head = index; in airoha_dev_xmit()
1878 q->queued += i; in airoha_dev_xmit()
1881 netdev_tx_sent_queue(txq, skb->len); in airoha_dev_xmit()
1886 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); in airoha_dev_xmit()
1888 if (q->ndesc - q->queued < q->free_thr) in airoha_dev_xmit()
1891 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
1896 for (i--; i >= 0; i--) { in airoha_dev_xmit()
1897 index = (q->head + i) % q->ndesc; in airoha_dev_xmit()
1898 dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr, in airoha_dev_xmit()
1899 q->entry[index].dma_len, DMA_TO_DEVICE); in airoha_dev_xmit()
1902 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
1905 dev->stats.tx_dropped++; in airoha_dev_xmit()
1914 struct airoha_eth *eth = port->qdma->eth; in airoha_ethtool_get_drvinfo()
1916 strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); in airoha_ethtool_get_drvinfo()
1917 strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); in airoha_ethtool_get_drvinfo()
1928 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_ethtool_get_mac_stats()
1929 stats->MulticastFramesXmittedOK = port->stats.tx_multicast; in airoha_ethtool_get_mac_stats()
1930 stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; in airoha_ethtool_get_mac_stats()
1931 stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; in airoha_ethtool_get_mac_stats()
1932 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_ethtool_get_mac_stats()
1949 const struct ethtool_rmon_hist_range **ranges) in airoha_ethtool_get_rmon_stats() argument
1952 struct airoha_hw_stats *hw_stats = &port->stats; in airoha_ethtool_get_rmon_stats()
1956 ARRAY_SIZE(hw_stats->tx_len) + 1); in airoha_ethtool_get_rmon_stats()
1958 ARRAY_SIZE(hw_stats->rx_len) + 1); in airoha_ethtool_get_rmon_stats()
1960 *ranges = airoha_ethtool_rmon_ranges; in airoha_ethtool_get_rmon_stats()
1965 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_ethtool_get_rmon_stats()
1966 stats->fragments = hw_stats->rx_fragment; in airoha_ethtool_get_rmon_stats()
1967 stats->jabbers = hw_stats->rx_jabber; in airoha_ethtool_get_rmon_stats()
1968 for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1; in airoha_ethtool_get_rmon_stats()
1970 stats->hist[i] = hw_stats->rx_len[i]; in airoha_ethtool_get_rmon_stats()
1971 stats->hist_tx[i] = hw_stats->tx_len[i]; in airoha_ethtool_get_rmon_stats()
1973 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_ethtool_get_rmon_stats()
1983 airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel), in airoha_qdma_set_chan_tx_sched()
1990 airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG, in airoha_qdma_set_chan_tx_sched()
1998 true, port->qdma, in airoha_qdma_set_chan_tx_sched()
2004 airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3), in airoha_qdma_set_chan_tx_sched()
2024 struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params; in airoha_qdma_set_tx_ets_sched()
2029 if (p->bands > AIROHA_NUM_QOS_QUEUES) in airoha_qdma_set_tx_ets_sched()
2030 return -EINVAL; in airoha_qdma_set_tx_ets_sched()
2032 for (i = 0; i < p->bands; i++) { in airoha_qdma_set_tx_ets_sched()
2033 if (!p->quanta[i]) in airoha_qdma_set_tx_ets_sched()
2038 if (nstrict == AIROHA_NUM_QOS_QUEUES - 1) in airoha_qdma_set_tx_ets_sched()
2039 return -EINVAL; in airoha_qdma_set_tx_ets_sched()
2046 if (p->priomap[p->bands - i - 1] != i) in airoha_qdma_set_tx_ets_sched()
2047 return -EINVAL; in airoha_qdma_set_tx_ets_sched()
2050 for (i = 0; i < p->bands - nstrict; i++) { in airoha_qdma_set_tx_ets_sched()
2051 if (p->priomap[i] != nstrict + i) in airoha_qdma_set_tx_ets_sched()
2052 return -EINVAL; in airoha_qdma_set_tx_ets_sched()
2054 w[i] = p->weights[nstrict + i]; in airoha_qdma_set_tx_ets_sched()
2059 else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1) in airoha_qdma_set_tx_ets_sched()
2070 u64 cpu_tx_packets = airoha_qdma_rr(port->qdma, in airoha_qdma_get_tx_ets_stats()
2072 u64 fwd_tx_packets = airoha_qdma_rr(port->qdma, in airoha_qdma_get_tx_ets_stats()
2074 u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) + in airoha_qdma_get_tx_ets_stats()
2075 (fwd_tx_packets - port->fwd_tx_packets); in airoha_qdma_get_tx_ets_stats()
2076 _bstats_update(opt->stats.bstats, 0, tx_packets); in airoha_qdma_get_tx_ets_stats()
2078 port->cpu_tx_packets = cpu_tx_packets; in airoha_qdma_get_tx_ets_stats()
2079 port->fwd_tx_packets = fwd_tx_packets; in airoha_qdma_get_tx_ets_stats()
2089 if (opt->parent == TC_H_ROOT) in airoha_tc_setup_qdisc_ets()
2090 return -EINVAL; in airoha_tc_setup_qdisc_ets()
2092 channel = TC_H_MAJ(opt->handle) >> 16; in airoha_tc_setup_qdisc_ets()
2095 switch (opt->command) { in airoha_tc_setup_qdisc_ets()
2104 return -EOPNOTSUPP; in airoha_tc_setup_qdisc_ets()
2124 return -ETIMEDOUT; in airoha_qdma_get_trtcm_param()
2161 return -EINVAL; in airoha_qdma_set_trtcm_config()
2179 return -EINVAL; in airoha_qdma_set_trtcm_token_bucket()
2186 return -EINVAL; in airoha_qdma_set_trtcm_token_bucket()
2190 return -EINVAL; in airoha_qdma_set_trtcm_token_bucket()
2218 err = airoha_qdma_set_trtcm_config(port->qdma, channel, in airoha_qdma_set_tx_rate_limit()
2224 err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel, in airoha_qdma_set_tx_rate_limit()
2237 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; in airoha_tc_htb_alloc_leaf_queue()
2238 u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */ in airoha_tc_htb_alloc_leaf_queue()
2239 struct net_device *dev = port->dev; in airoha_tc_htb_alloc_leaf_queue()
2240 int num_tx_queues = dev->real_num_tx_queues; in airoha_tc_htb_alloc_leaf_queue()
2243 if (opt->parent_classid != TC_HTB_CLASSID_ROOT) { in airoha_tc_htb_alloc_leaf_queue()
2244 NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid"); in airoha_tc_htb_alloc_leaf_queue()
2245 return -EINVAL; in airoha_tc_htb_alloc_leaf_queue()
2248 err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum); in airoha_tc_htb_alloc_leaf_queue()
2250 NL_SET_ERR_MSG_MOD(opt->extack, in airoha_tc_htb_alloc_leaf_queue()
2255 if (opt->command == TC_HTB_NODE_MODIFY) in airoha_tc_htb_alloc_leaf_queue()
2260 airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum); in airoha_tc_htb_alloc_leaf_queue()
2261 NL_SET_ERR_MSG_MOD(opt->extack, in airoha_tc_htb_alloc_leaf_queue()
2266 set_bit(channel, port->qos_sq_bmap); in airoha_tc_htb_alloc_leaf_queue()
2267 opt->qid = AIROHA_NUM_TX_RING + channel; in airoha_tc_htb_alloc_leaf_queue()
2279 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) in airoha_dev_setup_tc_block()
2280 return -EOPNOTSUPP; in airoha_dev_setup_tc_block()
2282 f->driver_block_list = &block_cb_list; in airoha_dev_setup_tc_block()
2283 switch (f->command) { in airoha_dev_setup_tc_block()
2285 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); in airoha_dev_setup_tc_block()
2290 block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL); in airoha_dev_setup_tc_block()
2296 list_add_tail(&block_cb->driver_list, &block_cb_list); in airoha_dev_setup_tc_block()
2299 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); in airoha_dev_setup_tc_block()
2301 return -ENOENT; in airoha_dev_setup_tc_block()
2305 list_del(&block_cb->driver_list); in airoha_dev_setup_tc_block()
2309 return -EOPNOTSUPP; in airoha_dev_setup_tc_block()
2315 struct net_device *dev = port->dev; in airoha_tc_remove_htb_queue()
2317 netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1); in airoha_tc_remove_htb_queue()
2319 clear_bit(queue, port->qos_sq_bmap); in airoha_tc_remove_htb_queue()
2325 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; in airoha_tc_htb_delete_leaf_queue()
2327 if (!test_bit(channel, port->qos_sq_bmap)) { in airoha_tc_htb_delete_leaf_queue()
2328 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); in airoha_tc_htb_delete_leaf_queue()
2329 return -EINVAL; in airoha_tc_htb_delete_leaf_queue()
2341 for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS) in airoha_tc_htb_destroy()
2350 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; in airoha_tc_get_htb_get_leaf_queue()
2352 if (!test_bit(channel, port->qos_sq_bmap)) { in airoha_tc_get_htb_get_leaf_queue()
2353 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); in airoha_tc_get_htb_get_leaf_queue()
2354 return -EINVAL; in airoha_tc_get_htb_get_leaf_queue()
2357 opt->qid = AIROHA_NUM_TX_RING + channel; in airoha_tc_get_htb_get_leaf_queue()
2365 switch (opt->command) { in airoha_tc_setup_qdisc_htb()
2380 return -EOPNOTSUPP; in airoha_tc_setup_qdisc_htb()
2400 return -EOPNOTSUPP; in airoha_dev_tc_setup()
2426 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { in airoha_metadata_dst_alloc()
2432 return -ENOMEM; in airoha_metadata_dst_alloc()
2434 md_dst->u.port_info.port_id = i; in airoha_metadata_dst_alloc()
2435 port->dsa_meta[i] = md_dst; in airoha_metadata_dst_alloc()
2445 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { in airoha_metadata_dst_free()
2446 if (!port->dsa_meta[i]) in airoha_metadata_dst_free()
2449 metadata_dst_free(port->dsa_meta[i]); in airoha_metadata_dst_free()
2458 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { in airoha_is_valid_gdm_port()
2459 if (eth->ports[i] == port) in airoha_is_valid_gdm_port()
2477 dev_err(eth->dev, "missing gdm port id\n"); in airoha_alloc_gdm_port()
2478 return -EINVAL; in airoha_alloc_gdm_port()
2482 p = id - 1; in airoha_alloc_gdm_port()
2484 if (!id || id > ARRAY_SIZE(eth->ports)) { in airoha_alloc_gdm_port()
2485 dev_err(eth->dev, "invalid gdm port id: %d\n", id); in airoha_alloc_gdm_port()
2486 return -EINVAL; in airoha_alloc_gdm_port()
2489 if (eth->ports[p]) { in airoha_alloc_gdm_port()
2490 dev_err(eth->dev, "duplicate gdm port id: %d\n", id); in airoha_alloc_gdm_port()
2491 return -EINVAL; in airoha_alloc_gdm_port()
2494 dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), in airoha_alloc_gdm_port()
2498 dev_err(eth->dev, "alloc_etherdev failed\n"); in airoha_alloc_gdm_port()
2499 return -ENOMEM; in airoha_alloc_gdm_port()
2502 qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA]; in airoha_alloc_gdm_port()
2503 dev->netdev_ops = &airoha_netdev_ops; in airoha_alloc_gdm_port()
2504 dev->ethtool_ops = &airoha_ethtool_ops; in airoha_alloc_gdm_port()
2505 dev->max_mtu = AIROHA_MAX_MTU; in airoha_alloc_gdm_port()
2506 dev->watchdog_timeo = 5 * HZ; in airoha_alloc_gdm_port()
2507 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | in airoha_alloc_gdm_port()
2511 dev->features |= dev->hw_features; in airoha_alloc_gdm_port()
2512 dev->vlan_features = dev->hw_features; in airoha_alloc_gdm_port()
2513 dev->dev.of_node = np; in airoha_alloc_gdm_port()
2514 dev->irq = qdma->irq; in airoha_alloc_gdm_port()
2515 SET_NETDEV_DEV(dev, eth->dev); in airoha_alloc_gdm_port()
2524 if (err == -EPROBE_DEFER) in airoha_alloc_gdm_port()
2528 dev_info(eth->dev, "generated random MAC address %pM\n", in airoha_alloc_gdm_port()
2529 dev->dev_addr); in airoha_alloc_gdm_port()
2533 u64_stats_init(&port->stats.syncp); in airoha_alloc_gdm_port()
2534 spin_lock_init(&port->stats.lock); in airoha_alloc_gdm_port()
2535 port->qdma = qdma; in airoha_alloc_gdm_port()
2536 port->dev = dev; in airoha_alloc_gdm_port()
2537 port->id = id; in airoha_alloc_gdm_port()
2538 eth->ports[p] = port; in airoha_alloc_gdm_port()
2553 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in airoha_probe()
2555 return -ENOMEM; in airoha_probe()
2557 eth->dev = &pdev->dev; in airoha_probe()
2559 err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); in airoha_probe()
2561 dev_err(eth->dev, "failed configuring DMA mask\n"); in airoha_probe()
2565 eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe"); in airoha_probe()
2566 if (IS_ERR(eth->fe_regs)) in airoha_probe()
2567 return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), in airoha_probe()
2570 eth->rsts[0].id = "fe"; in airoha_probe()
2571 eth->rsts[1].id = "pdma"; in airoha_probe()
2572 eth->rsts[2].id = "qdma"; in airoha_probe()
2573 err = devm_reset_control_bulk_get_exclusive(eth->dev, in airoha_probe()
2574 ARRAY_SIZE(eth->rsts), in airoha_probe()
2575 eth->rsts); in airoha_probe()
2577 dev_err(eth->dev, "failed to get bulk reset lines\n"); in airoha_probe()
2581 eth->xsi_rsts[0].id = "xsi-mac"; in airoha_probe()
2582 eth->xsi_rsts[1].id = "hsi0-mac"; in airoha_probe()
2583 eth->xsi_rsts[2].id = "hsi1-mac"; in airoha_probe()
2584 eth->xsi_rsts[3].id = "hsi-mac"; in airoha_probe()
2585 eth->xsi_rsts[4].id = "xfp-mac"; in airoha_probe()
2586 err = devm_reset_control_bulk_get_exclusive(eth->dev, in airoha_probe()
2587 ARRAY_SIZE(eth->xsi_rsts), in airoha_probe()
2588 eth->xsi_rsts); in airoha_probe()
2590 dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); in airoha_probe()
2594 eth->napi_dev = alloc_netdev_dummy(0); in airoha_probe()
2595 if (!eth->napi_dev) in airoha_probe()
2596 return -ENOMEM; in airoha_probe()
2599 eth->napi_dev->threaded = true; in airoha_probe()
2600 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); in airoha_probe()
2607 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_probe()
2608 airoha_qdma_start_napi(&eth->qdma[i]); in airoha_probe()
2611 for_each_child_of_node(pdev->dev.of_node, np) { in airoha_probe()
2612 if (!of_device_is_compatible(np, "airoha,eth-mac")) in airoha_probe()
2628 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_probe()
2629 airoha_qdma_stop_napi(&eth->qdma[i]); in airoha_probe()
2631 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_probe()
2632 airoha_hw_cleanup(&eth->qdma[i]); in airoha_probe()
2634 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { in airoha_probe()
2635 struct airoha_gdm_port *port = eth->ports[i]; in airoha_probe()
2637 if (port && port->dev->reg_state == NETREG_REGISTERED) { in airoha_probe()
2638 unregister_netdev(port->dev); in airoha_probe()
2642 free_netdev(eth->napi_dev); in airoha_probe()
2653 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { in airoha_remove()
2654 airoha_qdma_stop_napi(&eth->qdma[i]); in airoha_remove()
2655 airoha_hw_cleanup(&eth->qdma[i]); in airoha_remove()
2658 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { in airoha_remove()
2659 struct airoha_gdm_port *port = eth->ports[i]; in airoha_remove()
2664 airoha_dev_stop(port->dev); in airoha_remove()
2665 unregister_netdev(port->dev); in airoha_remove()
2668 free_netdev(eth->napi_dev); in airoha_remove()
2675 { .compatible = "airoha,en7581-eth" },