[openwrt/openwrt] mediatek: fix ethernet rx hang issue on MT7981/MT7986

LEDE Commits lede-commits at lists.infradead.org
Tue Jan 16 10:44:31 PST 2024


nbd pushed a commit to openwrt/openwrt.git, branch openwrt-23.05:
https://git.openwrt.org/2dfd1453da8ebdb07f008095e1329d95c2d31922

commit 2dfd1453da8ebdb07f008095e1329d95c2d31922
Author: Felix Fietkau <nbd at nbd.name>
AuthorDate: Tue Jan 16 19:29:52 2024 +0100

    mediatek: fix ethernet rx hang issue on MT7981/MT7986
    
    Add patches by Lorenzo/Daniel to use QDMA instead of ADMAv2
    
    Signed-off-by: Felix Fietkau <nbd at nbd.name>
    (cherry picked from commit ede34465de2176229590028fba622d54841ab40c)
---
 ...t-mediatek-split-tx-and-rx-fields-in-mtk_.patch | 599 +++++++++++++++++++++
 ...t-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch | 123 +++++
 2 files changed, 722 insertions(+)

diff --git a/target/linux/mediatek/patches-5.15/961-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch b/target/linux/mediatek/patches-5.15/961-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch
new file mode 100644
index 0000000000..7b78033ffb
--- /dev/null
+++ b/target/linux/mediatek/patches-5.15/961-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch
@@ -0,0 +1,599 @@
+From: Lorenzo Bianconi <lorenzo at kernel.org>
+Date: Thu, 2 Nov 2023 16:47:07 +0100
+Subject: [PATCH net-next 1/2] net: ethernet: mediatek: split tx and rx fields
+ in mtk_soc_data struct
+
+Split tx and rx fields in mtk_soc_data struct. This is a preliminary
+patch to roll back to QDMA for MT7986 SoC in order to fix a hw hang
+if the device receives a corrupted packet.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo at kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h |  29 +--
+ 2 files changed, 139 insertions(+), 100 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1238,7 +1238,7 @@ static int mtk_init_fq_dma(struct mtk_et
+ 		eth->scratch_ring = eth->sram_base;
+ 	else
+ 		eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+-						       cnt * soc->txrx.txd_size,
++						       cnt * soc->tx.desc_size,
+ 						       &eth->phy_scratch_ring,
+ 						       GFP_KERNEL);
+ 	if (unlikely(!eth->scratch_ring))
+@@ -1254,16 +1254,16 @@ static int mtk_init_fq_dma(struct mtk_et
+ 	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ 		return -ENOMEM;
+ 
+-	phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
++	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+ 
+ 	for (i = 0; i < cnt; i++) {
+ 		struct mtk_tx_dma_v2 *txd;
+ 
+-		txd = eth->scratch_ring + i * soc->txrx.txd_size;
++		txd = eth->scratch_ring + i * soc->tx.desc_size;
+ 		txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ 		if (i < cnt - 1)
+ 			txd->txd2 = eth->phy_scratch_ring +
+-				    (i + 1) * soc->txrx.txd_size;
++				    (i + 1) * soc->tx.desc_size;
+ 
+ 		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ 		txd->txd4 = 0;
+@@ -1510,7 +1510,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ 	if (itxd == ring->last_free)
+ 		return -ENOMEM;
+ 
+-	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+ 	memset(itx_buf, 0, sizeof(*itx_buf));
+ 
+ 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+@@ -1551,7 +1551,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ 
+ 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ 			txd_info.size = min_t(unsigned int, frag_size,
+-					      soc->txrx.dma_max_len);
++					      soc->tx.dma_max_len);
+ 			txd_info.qid = queue;
+ 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+ 					!(frag_size - txd_info.size);
+@@ -1564,7 +1564,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
+ 
+ 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
+-						    soc->txrx.txd_size);
++						    soc->tx.desc_size);
+ 			if (new_desc)
+ 				memset(tx_buf, 0, sizeof(*tx_buf));
+ 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+@@ -1607,7 +1607,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ 	} else {
+ 		int next_idx;
+ 
+-		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
++		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
+ 					 ring->dma_size);
+ 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ 	}
+@@ -1616,7 +1616,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ 
+ err_dma:
+ 	do {
+-		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+ 
+ 		/* unmap dma */
+ 		mtk_tx_unmap(eth, tx_buf, false);
+@@ -1641,7 +1641,7 @@ static int mtk_cal_txd_req(struct mtk_et
+ 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ 			frag = &skb_shinfo(skb)->frags[i];
+ 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+-					       eth->soc->txrx.dma_max_len);
++					       eth->soc->tx.dma_max_len);
+ 		}
+ 	} else {
+ 		nfrags += skb_shinfo(skb)->nr_frags;
+@@ -1782,7 +1782,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
+ 
+ 		ring = &eth->rx_ring[i];
+ 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+-		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++		rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ 		if (rxd->rxd2 & RX_DMA_DONE) {
+ 			ring->calc_idx_update = true;
+ 			return ring;
+@@ -1950,7 +1950,7 @@ static int mtk_xdp_submit_frame(struct m
+ 	}
+ 	htxd = txd;
+ 
+-	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
++	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
+ 	memset(tx_buf, 0, sizeof(*tx_buf));
+ 	htx_buf = tx_buf;
+ 
+@@ -1970,7 +1970,7 @@ static int mtk_xdp_submit_frame(struct m
+ 				goto unmap;
+ 
+ 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
+-						    soc->txrx.txd_size);
++						    soc->tx.desc_size);
+ 			memset(tx_buf, 0, sizeof(*tx_buf));
+ 			n_desc++;
+ 		}
+@@ -2007,7 +2007,7 @@ static int mtk_xdp_submit_frame(struct m
+ 	} else {
+ 		int idx;
+ 
+-		idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
++		idx = txd_to_idx(ring, txd, soc->tx.desc_size);
+ 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ 			MT7628_TX_CTX_IDX0);
+ 	}
+@@ -2019,7 +2019,7 @@ static int mtk_xdp_submit_frame(struct m
+ unmap:
+ 	while (htxd != txd) {
+ 		txd_pdma = qdma_to_pdma(ring, htxd);
+-		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
++		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
+ 		mtk_tx_unmap(eth, tx_buf, false);
+ 
+ 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+@@ -2147,7 +2147,7 @@ static int mtk_poll_rx(struct napi_struc
+ 			goto rx_done;
+ 
+ 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+-		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++		rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ 		data = ring->data[idx];
+ 
+ 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
+@@ -2282,7 +2282,7 @@ static int mtk_poll_rx(struct napi_struc
+ 			rxdcsum = &trxd.rxd4;
+ 		}
+ 
+-		if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
++		if (*rxdcsum & eth->soc->rx.dma_l4_valid)
+ 			skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 		else
+ 			skb_checksum_none_assert(skb);
+@@ -2403,7 +2403,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ 			break;
+ 
+ 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
+-					    eth->soc->txrx.txd_size);
++					    eth->soc->tx.desc_size);
+ 		if (!tx_buf->data)
+ 			break;
+ 
+@@ -2451,7 +2451,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ 		}
+ 		mtk_tx_unmap(eth, tx_buf, true);
+ 
+-		desc = ring->dma + cpu * eth->soc->txrx.txd_size;
++		desc = ring->dma + cpu * eth->soc->tx.desc_size;
+ 		ring->last_free = desc;
+ 		atomic_inc(&ring->free_count);
+ 
+@@ -2540,7 +2540,7 @@ static int mtk_napi_rx(struct napi_struc
+ 	do {
+ 		int rx_done;
+ 
+-		mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
++		mtk_w32(eth, eth->soc->rx.irq_done_mask,
+ 			reg_map->pdma.irq_status);
+ 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
+ 		rx_done_total += rx_done;
+@@ -2556,10 +2556,10 @@ static int mtk_napi_rx(struct napi_struc
+ 			return budget;
+ 
+ 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
+-		 eth->soc->txrx.rx_irq_done_mask);
++		 eth->soc->rx.irq_done_mask);
+ 
+ 	if (napi_complete_done(napi, rx_done_total))
+-		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++		mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+ 
+ 	return rx_done_total;
+ }
+@@ -2568,7 +2568,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ {
+ 	const struct mtk_soc_data *soc = eth->soc;
+ 	struct mtk_tx_ring *ring = &eth->tx_ring;
+-	int i, sz = soc->txrx.txd_size;
++	int i, sz = soc->tx.desc_size;
+ 	struct mtk_tx_dma_v2 *txd;
+ 	int ring_size;
+ 	u32 ofs, val;
+@@ -2691,14 +2691,14 @@ static void mtk_tx_clean(struct mtk_eth
+ 	}
+ 	if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
+ 		dma_free_coherent(eth->dma_dev,
+-				  ring->dma_size * soc->txrx.txd_size,
++				  ring->dma_size * soc->tx.desc_size,
+ 				  ring->dma, ring->phys);
+ 		ring->dma = NULL;
+ 	}
+ 
+ 	if (ring->dma_pdma) {
+ 		dma_free_coherent(eth->dma_dev,
+-				  ring->dma_size * soc->txrx.txd_size,
++				  ring->dma_size * soc->tx.desc_size,
+ 				  ring->dma_pdma, ring->phys_pdma);
+ 		ring->dma_pdma = NULL;
+ 	}
+@@ -2753,15 +2753,15 @@ static int mtk_rx_alloc(struct mtk_eth *
+ 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
+ 	    rx_flag != MTK_RX_FLAGS_NORMAL) {
+ 		ring->dma = dma_alloc_coherent(eth->dma_dev,
+-					       rx_dma_size * eth->soc->txrx.rxd_size,
+-					       &ring->phys, GFP_KERNEL);
++				rx_dma_size * eth->soc->rx.desc_size,
++				&ring->phys, GFP_KERNEL);
+ 	} else {
+ 		struct mtk_tx_ring *tx_ring = &eth->tx_ring;
+ 
+ 		ring->dma = tx_ring->dma + tx_ring_size *
+-			    eth->soc->txrx.txd_size * (ring_no + 1);
++			    eth->soc->tx.desc_size * (ring_no + 1);
+ 		ring->phys = tx_ring->phys + tx_ring_size *
+-			     eth->soc->txrx.txd_size * (ring_no + 1);
++			     eth->soc->tx.desc_size * (ring_no + 1);
+ 	}
+ 
+ 	if (!ring->dma)
+@@ -2772,7 +2772,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ 		dma_addr_t dma_addr;
+ 		void *data;
+ 
+-		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++		rxd = ring->dma + i * eth->soc->rx.desc_size;
+ 		if (ring->page_pool) {
+ 			data = mtk_page_pool_get_buff(ring->page_pool,
+ 						      &dma_addr, GFP_KERNEL);
+@@ -2861,7 +2861,7 @@ static void mtk_rx_clean(struct mtk_eth
+ 			if (!ring->data[i])
+ 				continue;
+ 
+-			rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++			rxd = ring->dma + i * eth->soc->rx.desc_size;
+ 			if (!rxd->rxd1)
+ 				continue;
+ 
+@@ -2878,7 +2878,7 @@ static void mtk_rx_clean(struct mtk_eth
+ 
+ 	if (!in_sram && ring->dma) {
+ 		dma_free_coherent(eth->dma_dev,
+-				  ring->dma_size * eth->soc->txrx.rxd_size,
++				  ring->dma_size * eth->soc->rx.desc_size,
+ 				  ring->dma, ring->phys);
+ 		ring->dma = NULL;
+ 	}
+@@ -3241,7 +3241,7 @@ static void mtk_dma_free(struct mtk_eth
+ 			netdev_reset_queue(eth->netdev[i]);
+ 	if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+ 		dma_free_coherent(eth->dma_dev,
+-				  MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
++				  MTK_QDMA_RING_SIZE * soc->tx.desc_size,
+ 				  eth->scratch_ring, eth->phy_scratch_ring);
+ 		eth->scratch_ring = NULL;
+ 		eth->phy_scratch_ring = 0;
+@@ -3291,7 +3291,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+ 
+ 	eth->rx_events++;
+ 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
+-		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++		mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ 		__napi_schedule(&eth->rx_napi);
+ 	}
+ 
+@@ -3317,9 +3317,9 @@ static irqreturn_t mtk_handle_irq(int ir
+ 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ 
+ 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+-	    eth->soc->txrx.rx_irq_done_mask) {
++	    eth->soc->rx.irq_done_mask) {
+ 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
+-		    eth->soc->txrx.rx_irq_done_mask)
++		    eth->soc->rx.irq_done_mask)
+ 			mtk_handle_irq_rx(irq, _eth);
+ 	}
+ 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+@@ -3337,10 +3337,10 @@ static void mtk_poll_controller(struct n
+ 	struct mtk_eth *eth = mac->hw;
+ 
+ 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+-	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ 	mtk_handle_irq_rx(eth->irq[2], dev);
+ 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+-	mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++	mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+ }
+ #endif
+ 
+@@ -3505,7 +3505,7 @@ static int mtk_open(struct net_device *d
+ 		napi_enable(&eth->tx_napi);
+ 		napi_enable(&eth->rx_napi);
+ 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+-		mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
++		mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
+ 		refcount_set(&eth->dma_refcnt, 1);
+ 	}
+ 	else
+@@ -3588,7 +3588,7 @@ static int mtk_stop(struct net_device *d
+ 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+ 
+ 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+-	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ 	napi_disable(&eth->tx_napi);
+ 	napi_disable(&eth->rx_napi);
+ 
+@@ -4064,9 +4064,9 @@ static int mtk_hw_init(struct mtk_eth *e
+ 
+ 	/* FE int grouping */
+ 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+-	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
++	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
+ 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+-	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
++	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
+ 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+ 
+ 	if (mtk_is_netsys_v3_or_greater(eth)) {
+@@ -5163,11 +5163,15 @@ static const struct mtk_soc_data mt2701_
+ 	.required_clks = MT7623_CLKS_BITMAP,
+ 	.required_pctl = true,
+ 	.version = 1,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5183,11 +5187,15 @@ static const struct mtk_soc_data mt7621_
+ 	.offload_version = 1,
+ 	.hash_offset = 2,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5205,11 +5213,15 @@ static const struct mtk_soc_data mt7622_
+ 	.hash_offset = 2,
+ 	.has_accounting = true,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5226,11 +5238,15 @@ static const struct mtk_soc_data mt7623_
+ 	.hash_offset = 2,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+ 	.disable_pll_modes = true,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5245,11 +5261,15 @@ static const struct mtk_soc_data mt7629_
+ 	.required_pctl = false,
+ 	.has_accounting = true,
+ 	.version = 1,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5267,11 +5287,15 @@ static const struct mtk_soc_data mt7981_
+ 	.hash_offset = 4,
+ 	.has_accounting = true,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma_v2),
+-		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma_v2),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++		.dma_len_offset = 8,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma_v2),
++		.irq_done_mask = MTK_RX_DONE_INT_V2,
++		.dma_l4_valid = RX_DMA_L4_VALID_V2,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = 8,
+ 	},
+@@ -5289,11 +5313,15 @@ static const struct mtk_soc_data mt7986_
+ 	.hash_offset = 4,
+ 	.has_accounting = true,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma_v2),
+-		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma_v2),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++		.dma_len_offset = 8,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma_v2),
++		.irq_done_mask = MTK_RX_DONE_INT_V2,
++		.dma_l4_valid = RX_DMA_L4_VALID_V2,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = 8,
+ 	},
+@@ -5311,11 +5339,15 @@ static const struct mtk_soc_data mt7988_
+ 	.hash_offset = 4,
+ 	.has_accounting = true,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma_v2),
+-		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma_v2),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++		.dma_len_offset = 8,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma_v2),
++		.irq_done_mask = MTK_RX_DONE_INT_V2,
++		.dma_l4_valid = RX_DMA_L4_VALID_V2,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = 8,
+ 	},
+@@ -5328,11 +5360,15 @@ static const struct mtk_soc_data rt5350_
+ 	.required_clks = MT7628_CLKS_BITMAP,
+ 	.required_pctl = false,
+ 	.version = 1,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -326,8 +326,8 @@
+ /* QDMA descriptor txd3 */
+ #define TX_DMA_OWNER_CPU	BIT(31)
+ #define TX_DMA_LS0		BIT(30)
+-#define TX_DMA_PLEN0(x)		(((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define TX_DMA_PLEN1(x)		((x) & eth->soc->txrx.dma_max_len)
++#define TX_DMA_PLEN0(x)		(((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
++#define TX_DMA_PLEN1(x)		((x) & eth->soc->tx.dma_max_len)
+ #define TX_DMA_SWC		BIT(14)
+ #define TX_DMA_PQID		GENMASK(3, 0)
+ #define TX_DMA_ADDR64_MASK	GENMASK(3, 0)
+@@ -347,8 +347,8 @@
+ /* QDMA descriptor rxd2 */
+ #define RX_DMA_DONE		BIT(31)
+ #define RX_DMA_LSO		BIT(30)
+-#define RX_DMA_PREP_PLEN0(x)	(((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define RX_DMA_GET_PLEN0(x)	(((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
++#define RX_DMA_PREP_PLEN0(x)	(((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
++#define RX_DMA_GET_PLEN0(x)	(((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
+ #define RX_DMA_VTAG		BIT(15)
+ #define RX_DMA_ADDR64_MASK	GENMASK(3, 0)
+ #if IS_ENABLED(CONFIG_64BIT)
+@@ -1279,10 +1279,9 @@ struct mtk_reg_map {
+  * @foe_entry_size		Foe table entry size.
+  * @has_accounting		Bool indicating support for accounting of
+  *				offloaded flows.
+- * @txd_size			Tx DMA descriptor size.
+- * @rxd_size			Rx DMA descriptor size.
+- * @rx_irq_done_mask		Rx irq done register mask.
+- * @rx_dma_l4_valid		Rx DMA valid register mask.
++ * @desc_size			Tx/Rx DMA descriptor size.
++ * @irq_done_mask		Rx irq done register mask.
++ * @dma_l4_valid		Rx DMA valid register mask.
+  * @dma_max_len			Max DMA tx/rx buffer length.
+  * @dma_len_offset		Tx/Rx DMA length field offset.
+  */
+@@ -1300,13 +1299,17 @@ struct mtk_soc_data {
+ 	bool		has_accounting;
+ 	bool		disable_pll_modes;
+ 	struct {
+-		u32	txd_size;
+-		u32	rxd_size;
+-		u32	rx_irq_done_mask;
+-		u32	rx_dma_l4_valid;
++		u32	desc_size;
+ 		u32	dma_max_len;
+ 		u32	dma_len_offset;
+-	} txrx;
++	} tx;
++	struct {
++		u32	desc_size;
++		u32	irq_done_mask;
++		u32	dma_l4_valid;
++		u32	dma_max_len;
++		u32	dma_len_offset;
++	} rx;
+ };
+ 
+ #define MTK_DMA_MONITOR_TIMEOUT		msecs_to_jiffies(1000)
diff --git a/target/linux/mediatek/patches-5.15/962-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch b/target/linux/mediatek/patches-5.15/962-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch
new file mode 100644
index 0000000000..df8e988eb5
--- /dev/null
+++ b/target/linux/mediatek/patches-5.15/962-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch
@@ -0,0 +1,123 @@
+From: Daniel Golle <daniel at makrotopia.org>
+Date: Tue, 10 Oct 2023 21:06:43 +0200
+Subject: [PATCH net-next 2/2] net: ethernet: mediatek: use QDMA instead of
+ ADMAv2 on MT7981 and MT7986
+
+ADMA is plagued by RX hangs which can't easily detected and happen upon
+receival of a corrupted package.
+Use QDMA just like on netsys v1 which is also still present and usable, and
+doesn't suffer from that problem.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo at kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo at kernel.org>
+Signed-off-by: Daniel Golle <daniel at makrotopia.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 46 ++++++++++-----------
+ 1 file changed, 23 insertions(+), 23 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -108,16 +108,16 @@ static const struct mtk_reg_map mt7986_r
+ 	.tx_irq_mask		= 0x461c,
+ 	.tx_irq_status		= 0x4618,
+ 	.pdma = {
+-		.rx_ptr		= 0x6100,
+-		.rx_cnt_cfg	= 0x6104,
+-		.pcrx_ptr	= 0x6108,
+-		.glo_cfg	= 0x6204,
+-		.rst_idx	= 0x6208,
+-		.delay_irq	= 0x620c,
+-		.irq_status	= 0x6220,
+-		.irq_mask	= 0x6228,
+-		.adma_rx_dbg0	= 0x6238,
+-		.int_grp	= 0x6250,
++		.rx_ptr		= 0x4100,
++		.rx_cnt_cfg	= 0x4104,
++		.pcrx_ptr	= 0x4108,
++		.glo_cfg	= 0x4204,
++		.rst_idx	= 0x4208,
++		.delay_irq	= 0x420c,
++		.irq_status	= 0x4220,
++		.irq_mask	= 0x4228,
++		.adma_rx_dbg0	= 0x4238,
++		.int_grp	= 0x4250,
+ 	},
+ 	.qdma = {
+ 		.qtx_cfg	= 0x4400,
+@@ -1206,7 +1206,7 @@ static bool mtk_rx_get_desc(struct mtk_e
+ 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+ 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+-	if (mtk_is_netsys_v2_or_greater(eth)) {
++	if (mtk_is_netsys_v3_or_greater(eth)) {
+ 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+ 	}
+@@ -2154,7 +2154,7 @@ static int mtk_poll_rx(struct napi_struc
+ 			break;
+ 
+ 		/* find out which mac the packet come from. values start at 1 */
+-		if (mtk_is_netsys_v2_or_greater(eth)) {
++		if (mtk_is_netsys_v3_or_greater(eth)) {
+ 			u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
+ 
+ 			switch (val) {
+@@ -2266,7 +2266,7 @@ static int mtk_poll_rx(struct napi_struc
+ 		skb->dev = netdev;
+ 		bytes += skb->len;
+ 
+-		if (mtk_is_netsys_v2_or_greater(eth)) {
++		if (mtk_is_netsys_v3_or_greater(eth)) {
+ 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ 			if (hash != MTK_RXD5_FOE_ENTRY)
+@@ -2807,7 +2807,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ 
+ 		rxd->rxd3 = 0;
+ 		rxd->rxd4 = 0;
+-		if (mtk_is_netsys_v2_or_greater(eth)) {
++		if (mtk_is_netsys_v3_or_greater(eth)) {
+ 			rxd->rxd5 = 0;
+ 			rxd->rxd6 = 0;
+ 			rxd->rxd7 = 0;
+@@ -4010,7 +4010,7 @@ static int mtk_hw_init(struct mtk_eth *e
+ 	else
+ 		mtk_hw_reset(eth);
+ 
+-	if (mtk_is_netsys_v2_or_greater(eth)) {
++	if (mtk_is_netsys_v3_or_greater(eth)) {
+ 		/* Set FE to PDMAv2 if necessary */
+ 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
+ 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
+@@ -5293,11 +5293,11 @@ static const struct mtk_soc_data mt7981_
+ 		.dma_len_offset = 8,
+ 	},
+ 	.rx = {
+-		.desc_size = sizeof(struct mtk_rx_dma_v2),
+-		.irq_done_mask = MTK_RX_DONE_INT_V2,
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
+ 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
+-		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+-		.dma_len_offset = 8,
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
+ 	},
+ };
+ 
+@@ -5319,11 +5319,11 @@ static const struct mtk_soc_data mt7986_
+ 		.dma_len_offset = 8,
+ 	},
+ 	.rx = {
+-		.desc_size = sizeof(struct mtk_rx_dma_v2),
+-		.irq_done_mask = MTK_RX_DONE_INT_V2,
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
+ 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
+-		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+-		.dma_len_offset = 8,
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
+ 	},
+ };
+ 




More information about the lede-commits mailing list