[openwrt/openwrt] mvebu: next backport mvnet MQPrio offload

LEDE Commits lede-commits at lists.infradead.org
Fri Dec 3 15:37:55 PST 2021


chunkeey pushed a commit to openwrt/openwrt.git, branch master:
https://git.openwrt.org/7fd1ca96a13112a7ea214b3baf076cd81d712378

commit 7fd1ca96a13112a7ea214b3baf076cd81d712378
Author: Kabuli Chana <newtownBuild at gmail.com>
AuthorDate: Thu Dec 2 15:03:51 2021 -0700

    mvebu: next backport mvnet MQPrio offload
    
    linux-next MQPrio patches adding TC traffic shaping offload
    
    Signed-off-by: Kabuli Chana <newtownBuild at gmail.com>
---
 ...next-ethernet-marvell-mvnetaMQPrioOffload.patch |  71 ++++++++
 ...et-next-ethernet-marvell-mvnetaMQPrioFlag.patch |  35 ++++
 ...t-next-ethernet-marvell-mvnetaMQPrioQueue.patch | 102 +++++++++++
 ...xt-ethernet-marvell-mvnetaMQPrioTCOffload.patch | 187 +++++++++++++++++++++
 4 files changed, 395 insertions(+)

diff --git a/target/linux/mvebu/patches-5.10/702-net-next-ethernet-marvell-mvnetaMQPrioOffload.patch b/target/linux/mvebu/patches-5.10/702-net-next-ethernet-marvell-mvnetaMQPrioOffload.patch
new file mode 100644
index 0000000000..8a5f3f3cf4
--- /dev/null
+++ b/target/linux/mvebu/patches-5.10/702-net-next-ethernet-marvell-mvnetaMQPrioOffload.patch
@@ -0,0 +1,71 @@
+From 75fa71e3acadbb4ab5eda18505277eb9a1f69b23 Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier at bootlin.com>
+Date: Fri, 26 Nov 2021 12:20:53 +0100
+Subject: net: mvneta: Use struct tc_mqprio_qopt_offload for MQPrio
+ configuration
+
+The struct tc_mqprio_qopt_offload is a container for struct tc_mqprio_qopt,
+that allows passing extra parameters, such as traffic shaping. This commit
+converts the current mqprio code to that new struct.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier at bootlin.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+(limited to 'drivers/net/ethernet/marvell/mvneta.c')
+
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 80e4b500695e6..46b7604805f76 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -38,6 +38,7 @@
+ #include <net/ipv6.h>
+ #include <net/tso.h>
+ #include <net/page_pool.h>
++#include <net/pkt_cls.h>
+ #include <linux/bpf_trace.h>
+ 
+ /* Registers */
+@@ -4908,14 +4909,14 @@ static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
+ }
+ 
+ static int mvneta_setup_mqprio(struct net_device *dev,
+-			       struct tc_mqprio_qopt *qopt)
++			       struct tc_mqprio_qopt_offload *mqprio)
+ {
+ 	struct mvneta_port *pp = netdev_priv(dev);
+ 	u8 num_tc;
+ 	int i;
+ 
+-	qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+-	num_tc = qopt->num_tc;
++	mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
++	num_tc = mqprio->qopt.num_tc;
+ 
+ 	if (num_tc > rxq_number)
+ 		return -EINVAL;
+@@ -4926,13 +4927,15 @@ static int mvneta_setup_mqprio(struct net_device *dev,
+ 		return 0;
+ 	}
+ 
+-	memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map));
++	memcpy(pp->prio_tc_map, mqprio->qopt.prio_tc_map,
++	       sizeof(pp->prio_tc_map));
+ 
+ 	mvneta_setup_rx_prio_map(pp);
+ 
+-	netdev_set_num_tc(dev, qopt->num_tc);
+-	for (i = 0; i < qopt->num_tc; i++)
+-		netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]);
++	netdev_set_num_tc(dev, mqprio->qopt.num_tc);
++	for (i = 0; i < mqprio->qopt.num_tc; i++)
++		netdev_set_tc_queue(dev, i, mqprio->qopt.count[i],
++				    mqprio->qopt.offset[i]);
+ 
+ 	return 0;
+ }
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mvebu/patches-5.10/703-net-next-ethernet-marvell-mvnetaMQPrioFlag.patch b/target/linux/mvebu/patches-5.10/703-net-next-ethernet-marvell-mvnetaMQPrioFlag.patch
new file mode 100644
index 0000000000..a9bf9b22c3
--- /dev/null
+++ b/target/linux/mvebu/patches-5.10/703-net-next-ethernet-marvell-mvnetaMQPrioFlag.patch
@@ -0,0 +1,35 @@
+From e7ca75fe6662f78bfeb0112671c812e4c7b8e214 Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier at bootlin.com>
+Date: Fri, 26 Nov 2021 12:20:54 +0100
+Subject: net: mvneta: Don't force-set the offloading flag
+
+The qopt->hw flag is set by the TC code according to the offloading mode
+asked by user. Don't force-set it in the driver, but instead read it to
+make sure we do what's asked.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier at bootlin.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+(limited to 'drivers/net/ethernet/marvell/mvneta.c')
+
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 46b7604805f76..d3ce87e69d2a8 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -4915,7 +4915,9 @@ static int mvneta_setup_mqprio(struct net_device *dev,
+ 	u8 num_tc;
+ 	int i;
+ 
+-	mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
++	if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
++		return 0;
++
+ 	num_tc = mqprio->qopt.num_tc;
+ 
+ 	if (num_tc > rxq_number)
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mvebu/patches-5.10/704-net-next-ethernet-marvell-mvnetaMQPrioQueue.patch b/target/linux/mvebu/patches-5.10/704-net-next-ethernet-marvell-mvnetaMQPrioQueue.patch
new file mode 100644
index 0000000000..ac5fdc329a
--- /dev/null
+++ b/target/linux/mvebu/patches-5.10/704-net-next-ethernet-marvell-mvnetaMQPrioQueue.patch
@@ -0,0 +1,102 @@
+From e9f7099d0730341b24c057acbf545dd019581db6 Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier at bootlin.com>
+Date: Fri, 26 Nov 2021 12:20:55 +0100
+Subject: net: mvneta: Allow having more than one queue per TC
+
+The current mqprio implementation assumed that we are only using one
+queue per TC. Use the offset and count parameters to allow using
+multiple queues per TC. In that case, the controller will use a standard
+round-robin algorithm to pick queues assigned to the same TC, with the
+same priority.
+
+This only applies to VLAN priorities in ingress traffic, each TC
+corresponding to a vlan priority.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier at bootlin.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 35 ++++++++++++++++++++---------------
+ 1 file changed, 20 insertions(+), 15 deletions(-)
+
+(limited to 'drivers/net/ethernet/marvell/mvneta.c')
+
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index d3ce87e69d2a8..aba452e8abfe6 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -493,7 +493,6 @@ struct mvneta_port {
+ 	u8 mcast_count[256];
+ 	u16 tx_ring_size;
+ 	u16 rx_ring_size;
+-	u8 prio_tc_map[8];
+ 
+ 	phy_interface_t phy_interface;
+ 	struct device_node *dn;
+@@ -4897,13 +4896,12 @@ static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
+ 	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
+ }
+ 
+-static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
++static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
+ {
+-	u32 val = 0;
+-	int i;
++	u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
+ 
+-	for (i = 0; i < rxq_number; i++)
+-		val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
++	val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
++	val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
+ 
+ 	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
+ }
+@@ -4912,8 +4910,8 @@ static int mvneta_setup_mqprio(struct net_device *dev,
+ 			       struct tc_mqprio_qopt_offload *mqprio)
+ {
+ 	struct mvneta_port *pp = netdev_priv(dev);
++	int rxq, tc;
+ 	u8 num_tc;
+-	int i;
+ 
+ 	if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
+ 		return 0;
+@@ -4923,21 +4921,28 @@ static int mvneta_setup_mqprio(struct net_device *dev,
+ 	if (num_tc > rxq_number)
+ 		return -EINVAL;
+ 
++	mvneta_clear_rx_prio_map(pp);
++
+ 	if (!num_tc) {
+-		mvneta_clear_rx_prio_map(pp);
+ 		netdev_reset_tc(dev);
+ 		return 0;
+ 	}
+ 
+-	memcpy(pp->prio_tc_map, mqprio->qopt.prio_tc_map,
+-	       sizeof(pp->prio_tc_map));
++	netdev_set_num_tc(dev, mqprio->qopt.num_tc);
++
++	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
++		netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
++				    mqprio->qopt.offset[tc]);
+ 
+-	mvneta_setup_rx_prio_map(pp);
++		for (rxq = mqprio->qopt.offset[tc];
++		     rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
++		     rxq++) {
++			if (rxq >= rxq_number)
++				return -EINVAL;
+ 
+-	netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+-	for (i = 0; i < mqprio->qopt.num_tc; i++)
+-		netdev_set_tc_queue(dev, i, mqprio->qopt.count[i],
+-				    mqprio->qopt.offset[i]);
++			mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
++		}
++	}
+ 
+ 	return 0;
+ }
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mvebu/patches-5.10/705-net-next-ethernet-marvell-mvnetaMQPrioTCOffload.patch b/target/linux/mvebu/patches-5.10/705-net-next-ethernet-marvell-mvnetaMQPrioTCOffload.patch
new file mode 100644
index 0000000000..d2fd1832d8
--- /dev/null
+++ b/target/linux/mvebu/patches-5.10/705-net-next-ethernet-marvell-mvnetaMQPrioTCOffload.patch
@@ -0,0 +1,187 @@
+From 2551dc9e398c37a15e52122d385c29a8b06be45f Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier at bootlin.com>
+Date: Fri, 26 Nov 2021 12:20:56 +0100
+Subject: net: mvneta: Add TC traffic shaping offload
+
+The mvneta controller is able to do some tocken-bucket per-queue traffic
+shaping. This commit adds support for setting these using the TC mqprio
+interface.
+
+The token-bucket parameters are customisable, but the current
+implementation configures them to have a 10kbps resolution for the
+rate limitation, since it allows to cover the whole range of max_rate
+values from 10kbps to 5Gbps with 10kbps increments.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier at bootlin.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 120 +++++++++++++++++++++++++++++++++-
+ 1 file changed, 119 insertions(+), 1 deletion(-)
+
+(limited to 'drivers/net/ethernet/marvell/mvneta.c')
+
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index aba452e8abfe6..2368ae3f0e10d 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -248,12 +248,39 @@
+ #define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
+ #define MVNETA_PORT_TX_RESET                     0x3cf0
+ #define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
++#define MVNETA_TXQ_CMD1_REG			 0x3e00
++#define      MVNETA_TXQ_CMD1_BW_LIM_SEL_V1	 BIT(3)
++#define      MVNETA_TXQ_CMD1_BW_LIM_EN		 BIT(0)
++#define MVNETA_REFILL_NUM_CLK_REG		 0x3e08
++#define      MVNETA_REFILL_MAX_NUM_CLK		 0x0000ffff
+ #define MVNETA_TX_MTU                            0x3e0c
+ #define MVNETA_TX_TOKEN_SIZE                     0x3e14
+ #define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
++#define MVNETA_TXQ_BUCKET_REFILL_REG(q)		 (0x3e20 + ((q) << 2))
++#define      MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK	0x3ff00000
++#define      MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT	20
++#define      MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX	 0x0007ffff
+ #define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
+ #define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
+ 
++/* The values of the bucket refill base period and refill period are taken from
++ * the reference manual, and adds up to a base resolution of 10Kbps. This allows
++ * to cover all rate-limit values from 10Kbps up to 5Gbps
++ */
++
++/* Base period for the rate limit algorithm */
++#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS	100
++
++/* Number of Base Period to wait between each bucket refill */
++#define MVNETA_TXQ_BUCKET_REFILL_PERIOD	1000
++
++/* The base resolution for rate limiting, in bps. Any max_rate value should be
++ * a multiple of that value.
++ */
++#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \
++					 (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \
++					  MVNETA_TXQ_BUCKET_REFILL_PERIOD))
++
+ #define MVNETA_LPI_CTRL_0                        0x2cc0
+ #define MVNETA_LPI_CTRL_1                        0x2cc4
+ #define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
+@@ -4906,11 +4933,74 @@ static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
+ 	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
+ }
+ 
++static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
++{
++	unsigned long core_clk_rate;
++	u32 refill_cycles;
++	u32 val;
++
++	core_clk_rate = clk_get_rate(pp->clk);
++	if (!core_clk_rate)
++		return -EINVAL;
++
++	refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS /
++			(NSEC_PER_SEC / core_clk_rate);
++
++	if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK)
++		return -EINVAL;
++
++	/* Enable bw limit algorithm version 3 */
++	val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
++	val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
++	mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
++
++	/* Set the base refill rate */
++	mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
++
++	return 0;
++}
++
++static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
++{
++	u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
++
++	val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
++	mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
++}
++
++static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
++				    u64 min_rate, u64 max_rate)
++{
++	u32 refill_val, rem;
++	u32 val = 0;
++
++	/* Convert to from Bps to bps */
++	max_rate *= 8;
++
++	if (min_rate)
++		return -EINVAL;
++
++	refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION,
++				 &rem);
++
++	if (rem || !refill_val ||
++	    refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX)
++		return -EINVAL;
++
++	val = refill_val;
++	val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD <<
++		MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT);
++
++	mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
++
++	return 0;
++}
++
+ static int mvneta_setup_mqprio(struct net_device *dev,
+ 			       struct tc_mqprio_qopt_offload *mqprio)
+ {
+ 	struct mvneta_port *pp = netdev_priv(dev);
+-	int rxq, tc;
++	int rxq, txq, tc, ret;
+ 	u8 num_tc;
+ 
+ 	if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
+@@ -4924,6 +5014,7 @@ static int mvneta_setup_mqprio(struct net_device *dev,
+ 	mvneta_clear_rx_prio_map(pp);
+ 
+ 	if (!num_tc) {
++		mvneta_disable_per_queue_rate_limit(pp);
+ 		netdev_reset_tc(dev);
+ 		return 0;
+ 	}
+@@ -4944,6 +5035,33 @@ static int mvneta_setup_mqprio(struct net_device *dev,
+ 		}
+ 	}
+ 
++	if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
++		mvneta_disable_per_queue_rate_limit(pp);
++		return 0;
++	}
++
++	if (mqprio->qopt.num_tc > txq_number)
++		return -EINVAL;
++
++	ret = mvneta_enable_per_queue_rate_limit(pp);
++	if (ret)
++		return ret;
++
++	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
++		for (txq = mqprio->qopt.offset[tc];
++		     txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
++		     txq++) {
++			if (txq >= txq_number)
++				return -EINVAL;
++
++			ret = mvneta_setup_queue_rates(pp, txq,
++						       mqprio->min_rate[tc],
++						       mqprio->max_rate[tc]);
++			if (ret)
++				return ret;
++		}
++	}
++
+ 	return 0;
+ }
+ 
+-- 
+cgit 1.2.3-1.el7
+



More information about the lede-commits mailing list