[PATCH net-next v2 7/8] net: stmmac: xgmac: Complete FPE support
Furong Xu
0x1207 at gmail.com
Thu Oct 17 23:39:13 PDT 2024
Implement the necessary stmmac_fpe_ops function callbacks for xgmac.
Signed-off-by: Furong Xu <0x1207 at gmail.com>
---
.../net/ethernet/stmicro/stmmac/stmmac_fpe.c | 77 +++++++++++++++++++
1 file changed, 77 insertions(+)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
index dfe911b3f486..c90ed7c1279d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
@@ -373,6 +373,78 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
&dwxgmac3_fpe_info);
}
+static int dwxgmac3_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+{
+ return common_fpe_irq_status(ioaddr + XGMAC_MAC_FPE_CTRL_STS, dev);
+}
+
+static void dwxgmac3_fpe_send_mpacket(void __iomem *ioaddr,
+ struct stmmac_fpe_cfg *cfg,
+ enum stmmac_mpacket_type type)
+{
+ common_fpe_send_mpacket(ioaddr + XGMAC_MAC_FPE_CTRL_STS, cfg, type);
+}
+
+static int dwxgmac3_fpe_get_add_frag_size(const void __iomem *ioaddr)
+{
+ return FIELD_GET(FPE_MTL_ADD_FRAG_SZ,
+ readl(ioaddr + XGMAC_MTL_FPE_CTRL_STS));
+}
+
+static void dwxgmac3_fpe_set_add_frag_size(void __iomem *ioaddr,
+ u32 add_frag_size)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(value, add_frag_size, FPE_MTL_ADD_FRAG_SZ),
+ ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+}
+
+static int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack,
+ u32 pclass)
+{
+ u32 val, offset, count, preemptible_txqs = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 num_tc = ndev->num_tc;
+
+ if (!num_tc) {
+ /* Restore default TC:Queue mapping */
+ for (u32 i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
+ writel(u32_replace_bits(val, i, XGMAC_Q2TCMAP),
+ priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
+ }
+ }
+
+ /* Synopsys Databook:
+ * "All Queues within a traffic class are selected in a round robin
+ * fashion (when packets are available) when the traffic class is
+ * selected by the scheduler for packet transmission. This is true for
+ * any of the scheduling algorithms."
+ */
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ count = ndev->tc_to_txq[tc].count;
+ offset = ndev->tc_to_txq[tc].offset;
+
+ if (pclass & BIT(tc))
+ preemptible_txqs |= GENMASK(offset + count - 1, offset);
+
+ for (u32 i = 0; i < count; i++) {
+ val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
+ writel(u32_replace_bits(val, tc, XGMAC_Q2TCMAP),
+ priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
+ }
+ }
+
+ val = readl(priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
+ priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+
+ return 0;
+}
+
const struct stmmac_fpe_ops dwmac5_fpe_ops = {
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
@@ -384,4 +456,9 @@ const struct stmmac_fpe_ops dwmac5_fpe_ops = {
const struct stmmac_fpe_ops dwxgmac_fpe_ops = {
.fpe_configure = dwxgmac3_fpe_configure,
+ .fpe_send_mpacket = dwxgmac3_fpe_send_mpacket,
+ .fpe_irq_status = dwxgmac3_fpe_irq_status,
+ .fpe_get_add_frag_size = dwxgmac3_fpe_get_add_frag_size,
+ .fpe_set_add_frag_size = dwxgmac3_fpe_set_add_frag_size,
+ .fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
};
--
2.34.1
More information about the linux-arm-kernel
mailing list