[openwrt/openwrt] kernel: add Intel/Lantiq VRX518 TC driver

LEDE Commits lede-commits at lists.infradead.org
Mon Jan 16 15:42:33 PST 2023


dangole pushed a commit to openwrt/openwrt.git, branch master:
https://git.openwrt.org/474bbe23b7ec72ba84b9b7c91e17a69f58bd88b2

commit 474bbe23b7ec72ba84b9b7c91e17a69f58bd88b2
Author: Martin Schiller <ms.3headeddevs at gmail.com>
AuthorDate: Wed Aug 21 08:36:05 2019 +0200

    kernel: add Intel/Lantiq VRX518 TC driver
    
    This driver version is also included in Intel UGW 8.5.2.10.
    
    Signed-off-by: Martin Schiller <ms.3headeddevs at gmail.com>
    [updated for kernel 5.10]
    Signed-off-by: Jan Hoffmann <jan at 3e8.eu>
    [update to 1.5.12.4, switch to tag tarball]
    Signed-off-by: Andre Heider <a.heider at gmail.com>
    [add working software data path]
    Signed-off-by: Jan Hoffmann <jan at 3e8.eu>
    Signed-off-by: Andre Heider <a.heider at gmail.com>
---
 package/kernel/lantiq/vrx518_tc/Makefile           |   74 ++
 .../lantiq/vrx518_tc/patches/100-compat.patch      |  859 +++++++++++++
 .../lantiq/vrx518_tc/patches/200-swplat.patch      | 1356 ++++++++++++++++++++
 .../lantiq/vrx518_tc/patches/201-desc-length.patch |  342 +++++
 .../kernel/lantiq/vrx518_tc/patches/202-napi.patch |  423 ++++++
 .../kernel/lantiq/vrx518_tc/patches/203-dbg.patch  |  120 ++
 6 files changed, 3174 insertions(+)

diff --git a/package/kernel/lantiq/vrx518_tc/Makefile b/package/kernel/lantiq/vrx518_tc/Makefile
new file mode 100644
index 0000000000..fde4c07e44
--- /dev/null
+++ b/package/kernel/lantiq/vrx518_tc/Makefile
@@ -0,0 +1,74 @@
+#
+# Copyright (C) 2019 OpenWrt.org
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+include $(TOPDIR)/rules.mk
+include $(INCLUDE_DIR)/kernel.mk
+
+PKG_NAME:=vrx518_tc
+PKG_VERSION:=1.5.12.4
+PKG_RELEASE:=$(AUTORELEASE)
+PKG_BASE_NAME:=vrx518_tc_drv
+
+UGW_VERSION=8.5.2.10
+UGW_BASENAME=$(PKG_BASE_NAME)-ugw_$(UGW_VERSION)
+
+PKG_SOURCE:=$(UGW_BASENAME).tar.bz2
+PKG_SOURCE_URL:=https://gitlab.com/prpl-foundation/intel/$(PKG_BASE_NAME)/-/archive/ugw_$(UGW_VERSION)/
+PKG_HASH:=0c5bb0f9a06dc4cc4bbb8b930d01a673daba1b66615e8328818356d32c8f1548
+PKG_BUILD_DIR:=$(KERNEL_BUILD_DIR)/$(UGW_BASENAME)
+PKG_LICENSE:=GPL-2.0
+PKG_LICENSE_FILES:=LICENSE
+
+include $(INCLUDE_DIR)/package.mk
+
+PLAT_DIR:=dcdp
+PKG_EXTMOD_SUBDIRS:=$(PLAT_DIR)
+
+# TODO this driver depends on the vrx518 ppe firmware, add this dependency if
+# that ever gets a compatible license
+define KernelPackage/$(PKG_NAME)
+  SECTION:=sys
+  CATEGORY:=Kernel modules
+  SUBMENU:=Network Devices
+  TITLE:=VRX518 TC driver
+  KCONFIG:= \
+    CONFIG_ATM_LANE=m \
+    CONFIG_ATM_MPOA=m \
+    CONFIG_ATM_MPOA_INTEL_DSL_PHY_SUPPORT=y
+  DEPENDS:=@TARGET_ipq40xx +kmod-vrx518_ep +kmod-crypto-md5 +kmod-atm +kmod-ipoa +br2684ctl
+  AUTOLOAD:=$(call AutoLoad,27,vrx518_tc)
+  FILES:=$(PKG_BUILD_DIR)/$(PLAT_DIR)/$(PKG_NAME).ko
+endef
+
+define KernelPackage/$(PKG_NAME)/description
+  VRX518 TC Driver
+endef
+
+define Build/Prepare
+	$(PKG_UNPACK)
+	# eliminate all carriage returns / convert to unix encoding
+	(cd $(PKG_BUILD_DIR) && find . -type f -exec sed -i 's/\r//g' {} +)
+	$(Build/Patch)
+endef
+
+define Build/InstallDev
+	$(INSTALL_DIR) $(1)/usr/include/net/
+	$(CP) $(PKG_BUILD_DIR)/$(PLAT_DIR)/inc/dsl_tc.h $(1)/usr/include/net/
+endef
+
+EXTRA_CFLAGS:= \
+	-I$(STAGING_DIR)/usr/include
+
+define Build/Compile
+	$(KERNEL_MAKE) \
+		M="$(PKG_BUILD_DIR)/$(PLAT_DIR)" \
+		EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \
+		FEATURE_VRX518_CPU=y \
+		modules
+endef
+
+$(eval $(call KernelPackage,$(PKG_NAME)))
diff --git a/package/kernel/lantiq/vrx518_tc/patches/100-compat.patch b/package/kernel/lantiq/vrx518_tc/patches/100-compat.patch
new file mode 100644
index 0000000000..2c92912ade
--- /dev/null
+++ b/package/kernel/lantiq/vrx518_tc/patches/100-compat.patch
@@ -0,0 +1,859 @@
+--- a/dcdp/atm_tc.c
++++ b/dcdp/atm_tc.c
+@@ -44,9 +44,9 @@
+ #include <linux/atmioc.h>
+ #include <linux/skbuff.h>
+ #include "inc/dsl_tc.h"
+-#include <net/datapath_proc_api.h>
++// #include <net/datapath_proc_api.h>
+ #include <linux/atm.h>
+-#include <net/datapath_api.h>
++// #include <net/datapath_api.h>
+ #include <net/dc_ep.h>
+ #include<linux/sched.h>
+ #include<linux/kthread.h>
+@@ -730,20 +730,16 @@ static void atm_aca_init(struct atm_priv
+ 		ACA_TXOUT_EN | ACA_RXIN_EN | ACA_RXOUT_EN, 1);
+ }
+ 
+-static int print_datetime(char *buffer, const struct timespec *datetime)
++static int print_datetime(char *buffer, const struct timespec64 *datetime)
+ {
+-	struct timeval tv;
+ 	struct tm nowtm;
+ 	char tmbuf[64];
+-	s64 nsec;
+ 
+ 	if (buffer == NULL || datetime == NULL) {
+ 		pr_err("%s : Invalid arguments\n", __func__);
+ 		return -1;
+ 	}
+-	nsec = timespec_to_ns(datetime);
+-	tv = ns_to_timeval(nsec);
+-	time_to_tm(tv.tv_sec, 0, &nowtm);
++	time64_to_tm(datetime->tv_sec, 0, &nowtm);
+ 	memset(tmbuf, 0, 64);
+ 
+ 	snprintf(tmbuf, sizeof(tmbuf), "%ld-%d-%d %d:%d:%d",
+@@ -753,7 +749,7 @@ static int print_datetime(char *buffer,
+ 			nowtm.tm_hour,
+ 			nowtm.tm_min,
+ 			nowtm.tm_sec);
+-	snprintf(buffer, sizeof(buffer), "%s.%06d", tmbuf, (int)tv.tv_usec);
++	snprintf(buffer, sizeof(buffer), "%s.%06d", tmbuf, (int)datetime->tv_nsec / 1000);
+ 
+ 	return 0;
+ }
+@@ -1313,7 +1309,7 @@ static int ppe_send(struct atm_vcc *vcc,
+ 	/* assume LLC header + Ethernet ID: 6+2 */
+     if ((priv->conn[conn].mpoa_type == MPOA_TYPE_EOA_WO_FCS) ||
+ 		(priv->conn[conn].mpoa_type == MPOA_TYPE_EOA_W_FCS)) {
+-        if (__skb_put_padto(skb, ETH_ZLEN + 8))
++        if (__skb_put_padto(skb, ETH_ZLEN + 8, false))
+ 		    goto CHECK_SHOWTIME_FAIL;
+ 	}
+ 
+@@ -1418,7 +1414,7 @@ int ppe_send_oam(struct atm_vcc *vcc, vo
+ 	struct atm_priv *priv = g_atm_tc;
+ 	struct sk_buff *skb;
+ 	unsigned int conn;
+-	dp_subif_t dp_id;
++// 	dp_subif_t dp_id;
+ #ifdef OAM_FIX_GRX750
+ 	unsigned char *dest_cell;
+ #endif
+@@ -1465,8 +1461,8 @@ int ppe_send_oam(struct atm_vcc *vcc, vo
+ 		priv->tc_priv->param.oam_prio = 0;
+ 	qid = priv->conn[conn].prio_queue_map[priv->tc_priv->param.oam_prio];
+ 	vid = priv->conn[conn].subif_id;
+-	dp_id.subif = (vid & (~0x7f)) |
+-		ATM_DESC_SUBIF_ID(qid, mpoa_pt, mpoa_type);
++// 	dp_id.subif = (vid & (~0x7f)) |
++// 		ATM_DESC_SUBIF_ID(qid, mpoa_pt, mpoa_type);
+ #ifdef OAM_FIX_GRX750
+ 	dest_cell = kmalloc(CELL_SIZE, GFP_KERNEL);
+ 	if (dest_cell == NULL) {
+@@ -1494,18 +1490,18 @@ int ppe_send_oam(struct atm_vcc *vcc, vo
+ #else
+ 	memcpy(skb->data, cell, CELL_SIZE);
+ #endif
+-	/* SET SUBIFID */
+-	skb->DW0 = (skb->DW0 & ~0x7FFF) | dp_id.subif;
+-	skb->dev = priv->conn[conn].dev;
+-	tc_dbg(priv->tc_priv, MSG_TX, "conn: %d, dev name: %s, qid: 0x%x len:%d\n",
+-		conn, skb->dev->name, dp_id.subif, skb->len);
+-	#ifdef OAM_FIX_GRX750
+-	if (priv->tc_priv->tc_ops.send(NULL,
+-			skb, dp_id.subif, ATM_OAM_PKT) == 0) {
+-	#else
++// 	/* SET SUBIFID */
++// 	skb->DW0 = (skb->DW0 & ~0x7FFF) | dp_id.subif;
++// 	skb->dev = priv->conn[conn].dev;
++// 	tc_dbg(priv->tc_priv, MSG_TX, "conn: %d, dev name: %s, qid: 0x%x len:%d\n",
++// 		conn, skb->dev->name, dp_id.subif, skb->len);
++// 	#ifdef OAM_FIX_GRX750
++// 	if (priv->tc_priv->tc_ops.send(NULL,
++// 			skb, dp_id.subif, ATM_OAM_PKT) == 0) {
++// 	#else
+ 	if (priv->tc_priv->tc_ops.send(NULL,
+ 			skb, qid, ATM_OAM_PKT) == 0) {
+-	#endif
++// 	#endif
+ 		priv->stats.oam_tx_pkts++;
+ 		priv->stats.oam_tx_bytes += skb->len;
+ 		priv->conn[conn].stats.oam_tx_pkts++;
+@@ -1604,7 +1600,7 @@ static void oam_push(struct atm_priv *pr
+ 		conn = -1; /* invalid */
+ 	if (conn_valid(conn) && priv->conn[conn].vcc != NULL) {
+ 		vcc = priv->conn[conn].vcc;
+-		priv->conn[conn].access_time = current_kernel_time();
++		ktime_get_coarse_ts64(&priv->conn[conn].access_time);
+ 
+ 		tc_dbg(priv->tc_priv, MSG_OAM_RX, "conn=%d, vpi: %d, vci:%d\n",
+ 			conn, header->vpi, header->vci);
+@@ -2547,30 +2543,29 @@ static void ppe_atm_fw_hw_init(struct at
+ static int atm_dev_init(struct atm_priv *atm_priv, int ep_id)
+ {
+ 	int i, err;
+-	struct atm_dev *dev;
+-	dev = atm_dev_register(g_atm_dev_name,
+-		atm_priv->tc_priv->ep_dev[ep_id].dev,
+-		&g_ppe_atm_ops, -1, NULL);
+-	if (!dev) {
+-		err = -EIO;
+-		goto ATM_DEV_REGISTER_FAIL;
+-	}
+-	dev->ci_range.vpi_bits = 8;
+-	dev->ci_range.vci_bits = 16;
+-	/* assume 3200 cell rate
+-	 * before get real information
+-	 */
+-	dev->link_rate =
+-		DEFAULT_CELL_RATE;
+-	dev->dev_data = atm_priv;
+-	dev->phy_data =
+-		(void *)(unsigned long)0;
+ 
+ 	for (i = 0; i < ATM_PORT_NUMBER; i++) {
+ 		if (atm_priv->port[i].dev)
+ 			continue;
+ 		atm_priv->port[i].tx_max_cell_rate = DEFAULT_CELL_RATE;
+-		atm_priv->port[i].dev = dev;
++		atm_priv->port[i].dev = atm_dev_register(g_atm_dev_name,
++				atm_priv->tc_priv->ep_dev[ep_id].dev,
++				&g_ppe_atm_ops, -1, NULL);
++		if (!atm_priv->port[i].dev) {
++			err = -EIO;
++			goto ATM_DEV_REGISTER_FAIL;
++		} else {
++			atm_priv->port[i].dev->ci_range.vpi_bits = 8;
++			atm_priv->port[i].dev->ci_range.vci_bits = 16;
++			/* assume 3200 cell rate
++			 * before get real information
++			 */
++			atm_priv->port[i].dev->link_rate =
++				DEFAULT_CELL_RATE;
++			atm_priv->port[i].dev->dev_data = atm_priv;
++			atm_priv->port[i].dev->phy_data =
++				(void *)(unsigned long)i;
++		}
+ 	}
+ 	//TODO : check for SoC PMAC, current fix
+ #ifdef CONFIG_SOC_TYPE_XWAY
+@@ -2985,7 +2980,8 @@ static unsigned int atm_get_pvc_id(struc
+ 		return -EINVAL;
+ 	}
+ 
+-	return (skb->DW0 >> 3) & 0xF;
++// 	return (skb->DW0 >> 3) & 0xF;
++	return 1;
+ }
+ 
+ static int atm_get_qid_by_vcc(struct net_device *dev, struct sk_buff *skb,
+@@ -3292,7 +3288,7 @@ static void atm_push(struct net_device *
+ 					+= skb->len;
+ 			} else
+ 				priv->stats.aal5_rx_errors++;
+-			priv->conn[conn].access_time = current_kernel_time();
++			ktime_get_coarse_ts64(&priv->conn[conn].access_time);
+ 			spin_unlock_bh(&priv->atm_lock);
+ 
+ 			vcc->push(vcc, skb);
+--- a/dcdp/inc/atm_tc.h
++++ b/dcdp/inc/atm_tc.h
+@@ -449,7 +449,7 @@ struct atm_port {
+ struct atm_pvc {
+ 	struct atm_vcc *vcc; /* opened VCC */
+ 	struct net_device *dev; /* net device associated with atm VCC */
+-	struct timespec access_time; /* time when last user cell arrived */
++	struct timespec64 access_time; /* time when last user cell arrived */
+ 	int prio_queue_map[ATM_PRIO_Q_NUM];
+ 	unsigned int prio_tx_packets[ATM_PRIO_Q_NUM];
+ 	struct atm_stats stats;
+--- a/dcdp/inc/tc_api.h
++++ b/dcdp/inc/tc_api.h
+@@ -196,19 +196,6 @@ static inline void aca_ring_addr_init(st
+ 	ring->aca_cnt_phyaddr = ep_dev->phy_membase + addr;
+ }
+ 
+-static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len)
+-{
+-	unsigned int size = skb->len;
+-
+-	if (unlikely(size < len)) {
+-		len -= size;
+-		if (skb_pad(skb, len))
+-			return -ENOMEM;
+-		__skb_put(skb, len);
+-	}
+-	return 0;
+-}
+-
+ extern int showtime_stat(struct tc_priv *);
+ extern void dump_skb_info(struct tc_priv *, struct sk_buff *, u32);
+ extern void *tc_buf_alloc(void *, size_t, u32 *,
+--- a/dcdp/inc/tc_proc.h
++++ b/dcdp/inc/tc_proc.h
+@@ -23,6 +23,8 @@
+ #ifndef __TC_PROC_H__
+ #define __TC_PROC_H__
+ 
++#include <linux/version.h>
++
+ #define TC_PROC_DIR "driver/vrx518"
+ #define TC_PROC_ATM_DIR "atm"
+ #define TC_PROC_PTM_DIR "ptm"
+@@ -41,7 +43,7 @@ enum {
+ struct tc_proc_list {
+ 	char proc_name[32];
+ 	umode_t mode;
+-	const struct file_operations *fops;
++	const struct proc_ops *fops;
+ 	int is_folder;
+ };
+ 
+--- a/dcdp/ptm_tc.c
++++ b/dcdp/ptm_tc.c
+@@ -39,7 +39,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/printk.h>
+ #include <linux/etherdevice.h>
+-#include <net/datapath_proc_api.h>
++// #include <net/datapath_proc_api.h>
+ 
+ #include "inc/tc_main.h"
+ #include "inc/reg_addr.h"
+@@ -182,8 +182,8 @@ static int ptm_get_qid(struct net_device
+ 	return qid;
+ }
+ 
+-static struct rtnl_link_stats64 *ptm_get_stats(struct net_device *dev,
+-					struct rtnl_link_stats64 *storage)
++static void ptm_get_stats(struct net_device *dev,
++			  struct rtnl_link_stats64 *storage)
+ {
+ 	struct ptm_priv *ptm_tc = netdev_priv(dev);
+ 
+@@ -191,8 +191,6 @@ static struct rtnl_link_stats64 *ptm_get
+ 		memcpy(storage, &ptm_tc->stats64, sizeof(ptm_tc->stats64));
+ 	else
+ 		storage->tx_errors += ptm_tc->stats64.tx_errors;
+-
+-	return storage;
+ }
+ 
+ static int ptm_set_mac_address(struct net_device *dev, void *p)
+@@ -209,7 +207,7 @@ static int ptm_set_mac_address(struct ne
+ 	return 0;
+ }
+ 
+-static void ptm_tx_timeout(struct net_device *dev)
++static void ptm_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ {
+ 	struct ptm_priv *ptm_tc = netdev_priv(dev);
+ 
+@@ -503,7 +501,7 @@ static int ptm_xmit(struct sk_buff *skb,
+ 	if (!showtime_stat(ptm_tc->tc_priv))
+ 		goto PTM_XMIT_DROP;
+ 
+-	if (__skb_put_padto(skb, ETH_ZLEN))
++	if (__skb_put_padto(skb, ETH_ZLEN, false))
+ 		goto PTM_XMIT_DROP;
+ 
+ 	dump_skb_info(ptm_tc->tc_priv, skb, (MSG_TX | MSG_TXDATA));
+@@ -632,11 +630,8 @@ static int ptm_dev_init(struct tc_priv *
+ 	struct ptm_priv *ptm_tc;
+ 	const char macaddr[ETH_ALEN]
+ 		= {0xAC, 0x9A, 0x96, 0x11, 0x22, 0x33};
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+-	dev = alloc_netdev_mq(sizeof(*ptm_tc), "ptm%d", ptm_setup, 4);
+-#else
+-	dev = alloc_netdev_mq(sizeof(*ptm_tc), "ptm%d",  NET_NAME_ENUM, ptm_setup, 4);
+-#endif
++
++	dev = alloc_netdev_mq(sizeof(*ptm_tc), "dsl%d",  NET_NAME_ENUM, ptm_setup, 4);
+ 	if (!dev) {
+ 		tc_dbg(tc_priv, MSG_INIT, "Cannot alloc net device\n");
+ 		return -ENOMEM;
+@@ -2103,7 +2098,6 @@ static int ptm_showtime_exit(const unsig
+ 	struct ptm_ep_priv *priv = tc_ep_priv(idx);
+ 	u32 stop = ACA_TXIN_EN;
+ 	struct dc_ep_dev *ep;
+-	int i = 0;
+ 
+ 	tc_info(priv->tc_priv, MSG_EVENT, "Line[%d]: show time exit!\n", idx);
+ 	ep = priv->ep;
+--- a/dcdp/tc_api.c
++++ b/dcdp/tc_api.c
+@@ -52,18 +52,24 @@ static const char ppe_fw_name[] = "ppe_f
+ #define VRX518_PPE_FW_ID		0xB
+ #define MD5_LEN				16
+ 
++enum tc_multicast_groups {
++	TC_MCGRP,
++};
++
++/* TC message multicast group */
++static const struct genl_multicast_group tc_ml_grps[] = {
++	[TC_MCGRP] = { .name = TC_MCAST_GRP_NAME, },
++};
++
+ /* TC message genelink family */
+ static struct genl_family tc_gnl_family = {
+-	.id = GENL_ID_GENERATE,	/* To generate an id for the family*/
++// 	.id = GENL_ID_GENERATE,	/* To generate an id for the family*/
+ 	.hdrsize = 0,
+ 	.name = TC_FAMILY_NAME,	/*family name, used by userspace application*/
+ 	.version = 1,		/*version number  */
+ 	.maxattr = TC_A_MAX - 1,
+-};
+-
+-/* TC message multicast group */
+-static struct genl_multicast_group tc_ml_grp = {
+-	.name = TC_MCAST_GRP_NAME,
++	.mcgrps = tc_ml_grps,
++	.n_mcgrps = ARRAY_SIZE(tc_ml_grps),
+ };
+ 
+ /**
+@@ -568,7 +574,8 @@ int tc_ntlk_msg_send(struct tc_priv *pri
+ 	nla_put_u32(skb, TC_A_LINENO, ln_no);
+ 
+ 	genlmsg_end(skb, msg_head);
+-	ret = genlmsg_multicast(skb, pid, tc_ml_grp.id, GFP_KERNEL);
++	ret = genlmsg_multicast(&tc_gnl_family, skb, pid, TC_MCGRP,
++				GFP_KERNEL);
+ 	if (ret) {
+ 		tc_err(priv, MSG_EVENT, "Sent TC multicast message Fail!\n");
+ 		goto err1;
+@@ -590,21 +597,11 @@ int tc_gentlk_init(struct tc_priv *priv)
+ 		return ret;
+ 	}
+ 
+-	ret = genl_register_mc_group(&tc_gnl_family, &tc_ml_grp);
+-	if (ret) {
+-		tc_err(priv, MSG_EVENT, "register mc group fail: %i, grp name: %s\n",
+-			ret, tc_ml_grp.name);
+-		genl_unregister_family(&tc_gnl_family);
+-		return ret;
+-	}
+-
+ 	return 0;
+ }
+ 
+ void tc_gentlk_exit(void)
+ {
+-	/* unregister mc groups */
+-	genl_unregister_mc_group(&tc_gnl_family, &tc_ml_grp);
+ 	/*unregister the family*/
+ 	genl_unregister_family(&tc_gnl_family);
+ }
+@@ -666,7 +663,7 @@ void dump_skb_info(struct tc_priv *tcpri
+ 		(u32)skb->end, skb->len);
+ 	tc_dbg(tcpriv, type,
+ 		"skb: clone: %d, users: %d\n",
+-		skb->cloned, atomic_read(&skb->users));
++		skb->cloned, refcount_read(&skb->users));
+ 	tc_dbg(tcpriv, type,
+ 		"skb: nfrag: %d\n", skb_shinfo(skb)->nr_frags);
+ 
+@@ -936,7 +933,6 @@ static int fw_md5_check(struct tc_priv *
+ 	}
+ 
+ 	desc->tfm = md5;
+-	desc->flags = 0;
+ 
+ 	ret = crypto_shash_init(desc);
+ 	if (ret) {
+--- a/dcdp/tc_proc.c
++++ b/dcdp/tc_proc.c
+@@ -22,7 +22,9 @@
+ *******************************************************************************/
+ #include <linux/fs.h>
+ #include <linux/seq_file.h>
+-#include <net/datapath_api.h>
++// #include <net/datapath_api.h>
++#include <linux/etherdevice.h>
++#include <linux/atmdev.h>
+ #include <net/genetlink.h>
+ #include <linux/time.h>
+ #include "inc/tc_main.h"
+@@ -353,7 +355,7 @@ static ssize_t mem_proc_write(struct fil
+ 	}
+ 	addr = set_val = repeat_cnt = 0;
+ 
+-	if (!access_ok(VERIFY_READ, buf, count))
++	if (!access_ok(buf, count))
+ 		return -EFAULT;
+ 
+ 	len = count < sizeof(str) ? count : sizeof(str) - 1;
+@@ -450,13 +452,12 @@ static int proc_read_mem_seq_open(struct
+ 	return single_open(file, proc_read_mem, NULL);
+ }
+ 
+-static const struct file_operations mem_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_mem_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
+-	.write		= mem_proc_write,
++static const struct proc_ops mem_proc_fops = {
++	.proc_open	= proc_read_mem_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	.proc_write	= mem_proc_write,
+ };
+ 
+ static ssize_t pp32_proc_write(struct file *file, const char __user *buf,
+@@ -748,13 +749,12 @@ static int proc_read_pp32_seq_open(struc
+ 	return single_open(file, proc_read_pp32, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations pp32_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_pp32_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
+-	.write		= pp32_proc_write,
++static const struct proc_ops pp32_proc_fops = {
++	.proc_open	= proc_read_pp32_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	.proc_write	= pp32_proc_write,
+ };
+ 
+ static int proc_read_tc_cfg(struct seq_file *seq, void *v)
+@@ -865,13 +865,12 @@ static int proc_read_tc_cfg_seq_open(str
+ 	return single_open(file, proc_read_tc_cfg, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations tc_cfg_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_tc_cfg_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
+-	.write		= proc_write_cfg,
++static const struct proc_ops tc_cfg_proc_fops = {
++	.proc_open	= proc_read_tc_cfg_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	.proc_write	= proc_write_cfg,
+ };
+ 
+ static ssize_t proc_write_dbg(struct file *file, const char __user *buf,
+@@ -951,13 +950,12 @@ static int proc_read_dbg_seq_open(struct
+ 	return single_open(file, proc_read_dbg, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations tc_dbg_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_dbg_seq_open,
+-	.read		= seq_read,
+-	.write		= proc_write_dbg,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
++static const struct proc_ops tc_dbg_proc_fops = {
++	.proc_open	= proc_read_dbg_seq_open,
++	.proc_read	= seq_read,
++	.proc_write	= proc_write_dbg,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
+ };
+ 
+ static ssize_t proc_write_tc_switch(struct file *file, const char __user *buf,
+@@ -1018,11 +1016,11 @@ proc_tc_switch_help:
+ 	return count;
+ }
+ 
+-static const struct file_operations tc_switch_proc_fops = {
+-	.owner      = THIS_MODULE,
+-	.write      = proc_write_tc_switch,
+-	.llseek     = noop_llseek,
++static const struct proc_ops tc_switch_proc_fops = {
++	.proc_write = proc_write_tc_switch,
++	.proc_lseek = noop_llseek,
+ };
++
+ static ssize_t proc_write_show_time(struct file *file, const char __user *buf,
+ 			size_t count, loff_t *data)
+ {
+@@ -1077,10 +1075,9 @@ proc_show_time_help:
+ 	return count;
+ }
+ 
+-static const struct file_operations tc_show_time_proc_fops = {
+-	.owner      = THIS_MODULE,
+-	.write      = proc_write_show_time,
+-	.llseek     = noop_llseek,
++static const struct proc_ops tc_show_time_proc_fops = {
++	.proc_write = proc_write_show_time,
++	.proc_lseek = noop_llseek,
+ };
+ 
+ static int proc_read_ver(struct seq_file *seq, void *v)
+@@ -1128,12 +1125,11 @@ static int proc_read_ver_seq_open(struct
+ 	return single_open(file, proc_read_ver, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations tc_ver_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_ver_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
++static const struct proc_ops tc_ver_proc_fops = {
++	.proc_open	= proc_read_ver_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
+ };
+ 
+ static int proc_read_soc(struct seq_file *seq, void *v)
+@@ -1142,20 +1138,18 @@ static int proc_read_soc(struct seq_file
+ 
+ 	tcpriv = (struct tc_priv *)seq->private;
+ 
+-#if 0
+ 	seq_printf(seq, "TXIN Base: 0x%08x, TXIN num: %d\n",
+-			tcpriv->cfg.txin_dbase,
+-			tcpriv->cfg.txin_dnum);
++			tcpriv->cfg.txin.soc_phydbase,
++			tcpriv->cfg.txin.soc_dnum);
+ 	seq_printf(seq, "TXOUT Base: 0x%08x, TXOUT num: %d\n",
+-			tcpriv->cfg.txout_dbase,
+-			tcpriv->cfg.txout_dnum);
++			tcpriv->cfg.txout.soc_phydbase,
++			tcpriv->cfg.txout.soc_dnum);
+ 	seq_printf(seq, "RXIN Base: 0x%08x, RXIN num: %d\n",
+-			tcpriv->cfg.rxin_dbase,
+-			tcpriv->cfg.rxin_dnum);
++			tcpriv->cfg.rxin.soc_phydbase,
++			tcpriv->cfg.rxin.soc_dnum);
+ 	seq_printf(seq, "RXOUT Base: 0x%08x, RXOUT num: %d\n",
+-			tcpriv->cfg.rxout_dbase,
+-			tcpriv->cfg.rxout_dnum);
+-#endif
++			tcpriv->cfg.rxout.soc_phydbase,
++			tcpriv->cfg.rxout.soc_dnum);
+ 
+ 	return 0;
+ }
+@@ -1165,15 +1159,13 @@ static int proc_read_soc_seq_open(struct
+ 	return single_open(file, proc_read_soc, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations tc_soc_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_soc_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
++static const struct proc_ops tc_soc_proc_fops = {
++	.proc_open	= proc_read_soc_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
+ };
+ 
+-
+ static struct tc_proc_list tc_procs[] = {
+ 	{TC_PROC_DIR,	0,	NULL,			1},
+ 	{"cfg",		0644, &tc_cfg_proc_fops,	0},
+@@ -1241,13 +1233,12 @@ static int proc_read_ptm_wanmib_seq_open
+ 	return single_open(file, proc_read_ptm_wanmib, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations ptm_wanmib_proc_fops = {
+-	.owner      = THIS_MODULE,
+-	.open       = proc_read_ptm_wanmib_seq_open,
+-	.read       = seq_read,
+-	.write      = proc_write_ptm_wanmib,
+-	.llseek     = seq_lseek,
+-	.release    = single_release,
++static const struct proc_ops ptm_wanmib_proc_fops = {
++	.proc_open    = proc_read_ptm_wanmib_seq_open,
++	.proc_read    = seq_read,
++	.proc_write   = proc_write_ptm_wanmib,
++	.proc_lseek   = seq_lseek,
++	.proc_release = single_release,
+ };
+ 
+ static int proc_ptm_read_cfg(struct seq_file *seq, void *v)
+@@ -1300,7 +1291,7 @@ static ssize_t ptm_cfg_proc_write(struct
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!access_ok(VERIFY_READ, buf, count))
++	if (!access_ok(buf, count))
+ 		return -EFAULT;
+ 
+ 	len = count < sizeof(str) ? count : sizeof(str) - 1;
+@@ -1343,13 +1334,12 @@ proc_ptm_cfg_help:
+ }
+ 
+ 
+-static const struct file_operations ptm_cfg_proc_fops = {
+-	.owner = THIS_MODULE,
+-	.open = proc_read_cfg_seq_open,
+-	.read = seq_read,
+-	.llseek = seq_lseek,
+-	.write = ptm_cfg_proc_write,
+-	.release = single_release,
++static const struct proc_ops ptm_cfg_proc_fops = {
++	.proc_open = proc_read_cfg_seq_open,
++	.proc_read = seq_read,
++	.proc_lseek = seq_lseek,
++	.proc_write = ptm_cfg_proc_write,
++	.proc_release = single_release,
+ };
+ 
+ static ssize_t proc_ptm_write_prio(struct file *file, const char __user *buf,
+@@ -1455,13 +1445,12 @@ static int proc_ptm_read_prio_seq_open(s
+ 	return single_open(file, proc_ptm_read_prio, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations ptm_prio_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_ptm_read_prio_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.write		= proc_ptm_write_prio,
+-	.release	= single_release,
++static const struct proc_ops ptm_prio_proc_fops = {
++	.proc_open	= proc_ptm_read_prio_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_write	= proc_ptm_write_prio,
++	.proc_release	= single_release,
+ };
+ 
+ static int proc_ptm_read_bond_seq_open(struct inode *inode, struct file *file)
+@@ -1469,12 +1458,11 @@ static int proc_ptm_read_bond_seq_open(s
+ 	return single_open(file, proc_ptm_read_bond, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations ptm_bond_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_ptm_read_bond_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
++static const struct proc_ops ptm_bond_proc_fops = {
++	.proc_open	= proc_ptm_read_bond_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
+ };
+ 
+ static int proc_ptm_read_bondmib_seq_open(struct inode *inode,
+@@ -1483,13 +1471,12 @@ static int proc_ptm_read_bondmib_seq_ope
+ 	return single_open(file, proc_ptm_read_bondmib, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations ptm_bondmib_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_ptm_read_bondmib_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.write		= proc_ptm_write_bondmib,
+-	.release	= single_release,
++static const struct proc_ops ptm_bondmib_proc_fops = {
++	.proc_open	= proc_ptm_read_bondmib_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_write	= proc_ptm_write_bondmib,
++	.proc_release	= single_release,
+ };
+ 
+ struct fwdbg_t {
+@@ -1910,14 +1897,14 @@ static int proc_read_fwdbg_seq_open(stru
+ {
+ 	return single_open(file, proc_read_fwdbg, NULL);
+ }
+-static const struct file_operations fwdbg_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_fwdbg_seq_open,
+-	.read		= seq_read,
+-	.write		= proc_write_fwdbg_seq,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
++static const struct proc_ops fwdbg_proc_fops = {
++	.proc_open	= proc_read_fwdbg_seq_open,
++	.proc_read	= seq_read,
++	.proc_write	= proc_write_fwdbg_seq,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
+ };
++
+ static struct tc_proc_list ptm_sl_procs[] = {
+ 	{TC_PROC_PTM_DIR, 0,	NULL,			1},
+ 	{"mem",		0644,	&mem_proc_fops,		0},
+@@ -2077,7 +2064,7 @@ static ssize_t atm_cfg_proc_write(struct
+ 
+ 	priv = (struct atm_priv *)PDE_DATA(file_inode(file));
+ 
+-	if (!access_ok(VERIFY_READ, buf, count))
++	if (!access_ok(buf, count))
+ 		return -EFAULT;
+ 
+ 	len = count < sizeof(str) ? count : sizeof(str) - 1;
+@@ -2119,13 +2106,12 @@ proc_atm_cfg_help:
+ 	return count;
+ }
+ 
+-static const struct file_operations atm_cfg_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_atm_cfg_seq_open,
+-	.read		= seq_read,
+-	.write		= atm_cfg_proc_write,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
++static const struct proc_ops atm_cfg_proc_fops = {
++	.proc_open	= proc_read_atm_cfg_seq_open,
++	.proc_read	= seq_read,
++	.proc_write	= atm_cfg_proc_write,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
+ };
+ 
+ static ssize_t proc_write_atm_wanmib(struct file *file, const char __user *buf,
+@@ -2173,13 +2159,12 @@ static int proc_read_atm_wanmib_seq_open
+ 
+ 
+ 
+-static const struct file_operations atm_wanmib_proc_fops = {
+-	.owner      = THIS_MODULE,
+-	.open       = proc_read_atm_wanmib_seq_open,
+-	.read       = seq_read,
+-	.write      = proc_write_atm_wanmib,
+-	.llseek     = seq_lseek,
+-	.release    = single_release,
++static const struct proc_ops atm_wanmib_proc_fops = {
++	.proc_open    = proc_read_atm_wanmib_seq_open,
++	.proc_read    = seq_read,
++	.proc_write   = proc_write_atm_wanmib,
++	.proc_lseek   = seq_lseek,
++	.proc_release = single_release,
+ };
+ 
+ static int proc_read_htu_seq_open(struct inode *inode, struct file *file)
+@@ -2187,12 +2172,11 @@ static int proc_read_htu_seq_open(struct
+ 	return single_open(file, proc_read_htu, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations htu_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_htu_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
++static const struct proc_ops htu_proc_fops = {
++	.proc_open	= proc_read_htu_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
+ };
+ 
+ static int proc_read_queue_seq_open(struct inode *inode, struct file *file)
+@@ -2200,12 +2184,11 @@ static int proc_read_queue_seq_open(stru
+ 	return single_open(file, proc_read_queue, PDE_DATA(inode));
+ }
+ 
+-static const struct file_operations queue_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_queue_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
++static const struct proc_ops queue_proc_fops = {
++	.proc_open	= proc_read_queue_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
+ };
+ 
+ static void set_q_prio(struct atm_priv *priv,
+@@ -2428,13 +2411,12 @@ static const struct seq_operations pvc_m
+ 	.show = pvc_mib_seq_show,
+ };
+ 
+-static const struct file_operations atm_prio_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_atm_read_prio_seq_open,
+-	.read		= seq_read,
+-	.llseek		= seq_lseek,
+-	.write		= proc_atm_write_prio,
+-	.release	= single_release,
++static const struct proc_ops atm_prio_proc_fops = {
++	.proc_open	= proc_atm_read_prio_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_write	= proc_atm_write_prio,
++	.proc_release	= single_release,
+ };
+ 
+ static int proc_read_pvc_mib_seq_open(struct inode *inode, struct file *file)
+@@ -2447,12 +2429,11 @@ static int proc_read_pvc_mib_seq_open(st
+ 	return ret;
+ }
+ 
+-static const struct file_operations atm_pvc_mib_proc_fops = {
+-	.owner	= THIS_MODULE,
+-	.open	= proc_read_pvc_mib_seq_open,
+-	.read	= seq_read,
+-	.llseek	= seq_lseek,
+-	.release = seq_release,
++static const struct proc_ops atm_pvc_mib_proc_fops = {
++	.proc_open	= proc_read_pvc_mib_seq_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= seq_release,
+ };
+ 
+ static ssize_t proc_write_cell(struct file *file,
+@@ -2592,13 +2573,12 @@ static int proc_read_cell_seq_open(struc
+ 	return single_open(file, proc_read_cell, NULL);
+ }
+ 
+-static const struct file_operations atm_cell_proc_fops = {
+-	.owner		= THIS_MODULE,
+-	.open		= proc_read_cell_seq_open,
+-	.read		= seq_read,
+-	.write		= proc_write_cell,
+-	.llseek		= seq_lseek,
+-	.release	= single_release,
++static const struct proc_ops atm_cell_proc_fops = {
++	.proc_open	= proc_read_cell_seq_open,
++	.proc_read	= seq_read,
++	.proc_write	= proc_write_cell,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
+ };
+ 
+ static struct tc_proc_list atm_procs[] = {
diff --git a/package/kernel/lantiq/vrx518_tc/patches/200-swplat.patch b/package/kernel/lantiq/vrx518_tc/patches/200-swplat.patch
new file mode 100644
index 0000000000..793adefdd6
--- /dev/null
+++ b/package/kernel/lantiq/vrx518_tc/patches/200-swplat.patch
@@ -0,0 +1,1356 @@
+The existing software receive and transmit path does not actually work.
+This replaces it by a basic working implementation.
+
+--- a/dcdp/atm_tc.c
++++ b/dcdp/atm_tc.c
+@@ -603,7 +603,11 @@ static void atm_aca_init(struct atm_priv
+ 	cfg = &priv->tc_priv->cfg;
+ 
+ 	txin = &param.aca_txin;
++#if defined(__LITTLE_ENDIAN)
++	txin->byteswap = 0;
++#else
+ 	txin->byteswap = 1;
++#endif
+ 	txin->hd_size_in_dw = cfg->txin.soc_desc_dwsz;
+ 	txin->pd_desc_base = SB_XBAR_ADDR(__ACA_TX_IN_PD_LIST_BASE);
+ 	txin->pd_desc_num = __ACA_TX_IN_PD_LIST_NUM;
+@@ -625,7 +629,11 @@ static void atm_aca_init(struct atm_priv
+ 		txin->soc_cmlt_cnt_addr);
+ 
+ 	txout = &param.aca_txout;
++#if defined(__LITTLE_ENDIAN)
++	txout->byteswap = 0;
++#else
+ 	txout->byteswap = 1;
++#endif
+ 	txout->hd_size_in_dw = cfg->txout.soc_desc_dwsz;
+ 	txout->pd_desc_base = SB_XBAR_ADDR(__ACA_TX_OUT_PD_LIST_BASE);
+ 	txout->pd_desc_num = __ACA_TX_OUT_PD_LIST_NUM;
+@@ -647,7 +655,11 @@ static void atm_aca_init(struct atm_priv
+ 		txout->soc_cmlt_cnt_addr);
+ 
+ 	rxout = &param.aca_rxout;
++#if defined(__LITTLE_ENDIAN)
++	rxout->byteswap = 0;
++#else
+ 	rxout->byteswap = 1;
++#endif
+ 	rxout->hd_size_in_dw = cfg->rxout.soc_desc_dwsz;
+ 	rxout->pd_desc_base = SB_XBAR_ADDR(__ACA_RX_OUT_PD_LIST_BASE);
+ 	rxout->pd_desc_num = __ACA_RX_OUT_PD_LIST_NUM;
+@@ -669,7 +681,11 @@ static void atm_aca_init(struct atm_priv
+ 		rxout->soc_cmlt_cnt_addr);
+ 
+ 	rxin = &param.aca_rxin;
++#if defined(__LITTLE_ENDIAN)
++	rxin->byteswap = 0;
++#else
+ 	rxin->byteswap = 1;
++#endif
+ 	rxin->hd_size_in_dw = cfg->rxin.soc_desc_dwsz;
+ 	rxin->pd_desc_base = SB_XBAR_ADDR(__RX_IN_PD_DES_LIST_BASE);
+ 	rxin->pd_desc_num = __ACA_RX_IN_PD_LIST_NUM;
+@@ -1261,7 +1277,7 @@ static int ppe_ioctl(struct atm_dev *dev
+ static int ppe_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ {
+ 	int ret, qid, mpoa_pt, mpoa_type, vid;
+-	unsigned int prio, conn;
++	unsigned int prio, conn, len;
+ 	struct atm_priv *priv;
+ 
+ 	if (!vcc) {
+@@ -1327,12 +1343,14 @@ static int ppe_send(struct atm_vcc *vcc,
+ 	tc_dbg(priv->tc_priv, MSG_TX, "vid: 0x%x, qid: 0x%x\n",
+ 			vid, qid);
+ 
++	len = skb->len;
++
+ 	if (priv->tc_priv->tc_ops.send(NULL,
+ 			skb, qid, ATM_SL_PKT) == 0) {
+ 		priv->stats.aal5_tx_pkts++;
+-		priv->stats.aal5_tx_bytes += skb->len;
++		priv->stats.aal5_tx_bytes += len;
+ 		priv->conn[conn].stats.aal5_tx_pkts++;
+-		priv->conn[conn].stats.aal5_tx_bytes += skb->len;
++		priv->conn[conn].stats.aal5_tx_bytes += len;
+ 		priv->conn[conn].prio_tx_packets[prio]++;
+ 	} else {
+ 		tc_dbg(priv->tc_priv, MSG_TX, "ATM: TX fail\n");
+--- a/dcdp/ptm_tc.c
++++ b/dcdp/ptm_tc.c
+@@ -497,6 +497,7 @@ static int ptm_xmit(struct sk_buff *skb,
+ 	struct ptm_priv *ptm_tc = netdev_priv(dev);
+ 	int qid;
+ 	enum tc_pkt_type type;
++	unsigned int len;
+ 
+ 	if (!showtime_stat(ptm_tc->tc_priv))
+ 		goto PTM_XMIT_DROP;
+@@ -510,11 +511,13 @@ static int ptm_xmit(struct sk_buff *skb,
+ 	type = ptm_tc->tc_priv->tc_mode == TC_PTM_BND_MODE
+ 		? PTM_BOND_PKT : PTM_SL_PKT;
+ 
++	len = skb->len;
++
+ 	if (ptm_tc->tc_priv->tc_ops.send(dev, skb, qid, type) < 0)
+ 		ptm_tc->stats64.tx_dropped++;
+ 	else {
+ 		ptm_tc->stats64.tx_packets++;
+-		ptm_tc->stats64.tx_bytes += skb->len;
++		ptm_tc->stats64.tx_bytes += len;
+ 	}
+ 
+ 	return 0;
+@@ -631,7 +634,7 @@ static int ptm_dev_init(struct tc_priv *
+ 	const char macaddr[ETH_ALEN]
+ 		= {0xAC, 0x9A, 0x96, 0x11, 0x22, 0x33};
+ 
+-	dev = alloc_netdev_mq(sizeof(*ptm_tc), "dsl%d",  NET_NAME_ENUM, ptm_setup, 4);
++	dev = alloc_netdev(sizeof(*ptm_tc), "dsl%d",  NET_NAME_ENUM, ptm_setup);
+ 	if (!dev) {
+ 		tc_dbg(tc_priv, MSG_INIT, "Cannot alloc net device\n");
+ 		return -ENOMEM;
+@@ -2324,7 +2327,11 @@ static void ptm_aca_init(struct ptm_ep_p
+ 	cfg = &priv->tc_priv->cfg;
+ 
+ 	txin = &param.aca_txin;
++#if defined(__LITTLE_ENDIAN)
++	txin->byteswap = 0;
++#else
+ 	txin->byteswap = 1;
++#endif
+ 	txin->hd_size_in_dw = cfg->txin.soc_desc_dwsz;
+ 	txin->pd_desc_base = SB_XBAR_ADDR(__ACA_TX_IN_PD_LIST_BASE);
+ 	txin->pd_desc_num = __ACA_TX_IN_PD_LIST_NUM;
+@@ -2347,7 +2354,11 @@ static void ptm_aca_init(struct ptm_ep_p
+ 		txin->soc_cmlt_cnt_addr);
+ 
+ 	txout = &param.aca_txout;
++#if defined(__LITTLE_ENDIAN)
++	txout->byteswap = 0;
++#else
+ 	txout->byteswap = 1;
++#endif
+ 	txout->hd_size_in_dw = cfg->txout.soc_desc_dwsz;
+ 	if (priv->tc_priv->param.cdma_desc_loc == LOC_IN_FPI)
+ 		txout->pd_desc_base = sb_r32(__TX_OUT_SHADOW_PTR) - phybase;
+@@ -2373,7 +2384,11 @@ static void ptm_aca_init(struct ptm_ep_p
+ 		txout->soc_cmlt_cnt_addr);
+ 
+ 	rxout = &param.aca_rxout;
++#if defined(__LITTLE_ENDIAN)
++	rxout->byteswap = 0;
++#else
+ 	rxout->byteswap = 1;
++#endif
+ 	rxout->hd_size_in_dw = cfg->rxout.soc_desc_dwsz;
+ 	if (priv->tc_priv->param.cdma_desc_loc == LOC_IN_FPI)
+ 		rxout->pd_desc_base = sb_r32(__RX_OUT_SHADOW_PTR) - phybase;
+@@ -2399,7 +2414,11 @@ static void ptm_aca_init(struct ptm_ep_p
+ 		rxout->soc_cmlt_cnt_addr);
+ 
+ 	rxin = &param.aca_rxin;
++#if defined(__LITTLE_ENDIAN)
++	rxin->byteswap = 0;
++#else
+ 	rxin->byteswap = 1;
++#endif
+ 	rxin->hd_size_in_dw = cfg->rxin.soc_desc_dwsz;
+ 	rxin->pd_desc_base = SB_XBAR_ADDR(__RX_IN_PD_DES_LIST_BASE);
+ 	rxin->pd_desc_num = __ACA_RX_IN_PD_LIST_NUM;
+--- a/dcdp/platform/sw_plat.c
++++ b/dcdp/platform/sw_plat.c
+@@ -36,10 +36,13 @@
+ #include <linux/printk.h>
+ #include <linux/etherdevice.h>
+ #include <linux/workqueue.h>
+-#include "inc/dsl_tc.h"
++#include "../inc/dsl_tc.h"
+ 
+ #include "../inc/tc_main.h"
+ #include "../inc/reg_addr.h"
++#include "../inc/tc_common.h"
++
++#include "../inc/fw/vrx518_addr_def.h"
+ 
+ 
+ #define PMAC_SIZE		8
+@@ -70,7 +73,7 @@ enum {
+ #define TXIN_DNUM  	128
+ #define TXOUT_DNUM	128
+ #define RXOUT_DNUM	1024
+-#define RXIN_DNUM	1024
++#define RXIN_DNUM	0
+ 
+ #define TXIN_CHECK_NUM	32
+ 
+@@ -80,22 +83,32 @@ struct aca_ring {
+ 	void *umt_dst;
+ 	u32 umt_phydst;
+ 	u32 dnum;
++	u32 dsize;
+ 	int idx; /* SoC RX/TX index */
+-	int cnt;
+-	void *cnt_addr;
+-	u32 cnt_phyaddr;
+ 	int ep_dev_idx;
+ };
+ 
++struct tx_list_item {
++	size_t len;
++	void *buf;
++	dma_addr_t phyaddr;
++};
++
++struct tx_list {
++	struct tx_list_item *data;
++	u32 dnum;
++};
++
+ struct aca_ring_grp {
+ 	struct aca_ring rxin;
+ 	struct aca_ring txin;
+ 	struct aca_ring rxout;
+ 	struct aca_ring txout;
++	struct tx_list txlist;
+ };
+ 
+-#if 1
+-struct dma_desc {
++#if defined(__LITTLE_ENDIAN)
++struct dma_tx_desc {
+ 	/* DW 0 */
+ 	u32 qid;
+ 	/* DW 1 */
+@@ -112,8 +125,26 @@ struct dma_desc {
+ 	u32 c:1;
+ 	u32 own:1;
+ }__packed;
++
++struct dma_rx_desc {
++	/* DW 0 */
++	u32 qid;
++	/* DW 1 */
++	u32 res2;
++	/* DW 2 */
++	u32 data_len:16;
++	u32 res0:7;
++	u32 byte_off:3;
++	u32 res1:2;
++	u32 eop:1;
++	u32 sop:1;
++	u32 c:1;
++	u32 own:1;
++	/* DW 3 */
++	u32 data_ptr;
++}__packed;
+ #else
+-struct dma_desc {
++struct dma_tx_desc {
+ 	/* DW 0 */
+ 	u32 qid;
+ 	/* DW 1 */
+@@ -131,14 +162,25 @@ struct dma_desc {
+ 	u32 data_len:16;
+ }__packed;
+ 
++struct dma_rx_desc {
++	/* DW 0 */
++	u32 qid;
++	/* DW 1 */
++	u32 res;
++	/* DW 2 */
++	u32 own:1;
++	u32 c:1;
++	u32 sop:1;
++	u32 eop:1;
++	u32 res1:2;
++	u32 byte_off:3;
++	u32 res0:7;
++	u32 data_len:16;
++	/* DW 3 */
++	u32 data_ptr;
++}__packed;
+ #endif
+ 
+-struct plat_dma {
+-	u32 chan; /* CHAN IID */
+-	u32 dma_chan; /* CONTROLLER/PORT/CHAN ID */
+-	u32 ds_dnum; /* DS descriptor number */
+-};
+-
+ struct plat_umt {
+ 	u32 id;
+ 	u32 cbm_id;
+@@ -152,28 +194,28 @@ struct tc_req {
+ 	enum dsl_tc_mode tc_mode;
+ };
+ 
+-#if 0
+-struct tc_coc {
+-	enum ltq_cpufreq_state coc_stat;
+-	struct tasklet_struct coc_task;
++struct mem_map_entry {
++	dma_addr_t phyaddr;
++	void *mem;
++	size_t size;
++	struct hlist_node node;
+ };
+-#endif
+ 
+ struct plat_priv {
+ 	struct tc_priv *tc_priv;
+ 	struct plat_umt umt[EP_MAX_NUM];
+-	struct plat_dma dma[EP_MAX_NUM];
+ 	struct ltq_mei_atm_showtime_info dsl_ops;
+ 	struct tc_req req_work;
+ 	struct aca_ring_grp soc_rings;
+-	/* struct tc_coc coc;*/
++	struct net_device *netdev;
++	DECLARE_HASHTABLE(mem_map, 8);
+ };
+ 
+ static struct plat_priv *g_plat_priv;
+ struct tasklet_struct txout_task;
+ struct tasklet_struct rxout_task;
+ 
+-static void txout_action(struct tc_priv *priv, struct aca_ring *txout);
++static DEFINE_SPINLOCK(tx_spinlock);
+ 
+ void *ppa_callback_get(e_ltq_mei_cb_type type)
+ {
+@@ -259,122 +301,65 @@ static inline struct tc_priv *plat_to_tc
+ 	return g_plat_priv->tc_priv;
+ }
+ 
+-static int32_t plat_rx(struct net_device *rxdev, struct net_device *txdev,
+-	struct sk_buff *skb, int32_t len)
+-{
+-	int32_t err;
+-	struct tc_priv *tc_priv = plat_to_tcpriv();
+-
+-	if (unlikely(!rxdev)) {
+-		if (txdev != NULL)
+-			tc_dbg(tc_priv, MSG_RX,
+-				"Recv undelivered packet from DP lib\n");
+-		else
+-			tc_dbg(tc_priv, MSG_RX, "Recv unknown packet\n");
+-		err = -ENODEV;
+-		goto err1;
+-	}
+-
+-	tc_priv->tc_ops.recv(rxdev, skb);
+-	return 0;
+-
+-err1:
+-	dev_kfree_skb_any(skb);
+-
+-	return err;
+-}
+-
+-#if 0
+-static int32_t plat_get_subifid(struct net_device *dev, struct sk_buff *skb,
+-	void *subif_data, uint8_t dst_mac[MAX_ETH_ALEN],
+-	dp_subif_t *subif, uint32_t flags)
+-{
+-	int qid;
+-	struct tc_priv *priv = plat_to_tcpriv();
+-
+-	qid = priv->tc_ops.get_qid(dev, skb, subif_data, flags);
+-	if (qid < 0)
+-		return qid;
+-	else
+-		subif->subif = qid;
+-
+-	return 0;
+-}
+-#endif
+-
+-#if 0
+-static void plat_coc_tasklet(unsigned long arg)
+-{
+-	/* change state to D0 */
+-	if (g_plat_priv->coc.coc_stat == LTQ_CPUFREQ_PS_D0)
+-		return;
+-
+-	g_plat_priv->coc.coc_stat = LTQ_CPUFREQ_PS_D0;
+-}
+-
+-static void plat_coc_req(void)
+-{
+-	tasklet_schedule(&g_plat_priv->coc.coc_task);
+-}
++static void *plat_mem_alloc(size_t size, enum tc_dir dir, u32 *phyaddr);
++static void *plat_mem_virt(u32 phyaddr);
++static void plat_mem_free(u32 phyaddr, enum tc_dir dir);
+ 
++static void txlist_free(struct tx_list *list);
+ 
+-static int32_t plat_coc_stat(enum ltq_cpufreq_state new_state,
+-	enum ltq_cpufreq_state old_state, uint32_t flags)
++static int txlist_init(struct tx_list *list, u32 dnum)
+ {
+-	struct tc_priv *priv = plat_to_tcpriv();
+-	tc_dbg(priv, MSG_COC,
+-		"COC current state: %d, new state: %d, old state: %d\n",
+-		g_plat_priv->coc.coc_stat, new_state, old_state);
++	struct tx_list_item *item;
++	int i;
+ 
+-	if (g_plat_priv->coc.coc_stat != new_state) {
+-		g_plat_priv->coc.coc_stat = new_state;
++	list->dnum = dnum;
+ 
+-		if (new_state == LTQ_CPUFREQ_PS_D3) {
+-			/* Enable interrupt for DS packet */
+-			priv->tc_ops.irq_on(MBOX_PKT_RX);
+-		} else {
+-			/* Disable interrupt for DS packet */
+-			priv->tc_ops.irq_off(MBOX_PKT_RX);
++	list->data = kcalloc(dnum, sizeof(struct tx_list_item), GFP_KERNEL);
++	if (!list->data) {
++		pr_err("Failed to allocate TX list!\n");
++		goto err;
++	}
++
++	for (i = 0; i < list->dnum; i++) {
++		item = &list->data[i];
++
++		// use plat_mem_alloc as these buffers will be mixed with buffers allocated in ptm_tc.c / atm_tc.c
++		item->buf = plat_mem_alloc(DMA_PACKET_SZ, US_DIR, &item->phyaddr);
++		if (!item->buf) {
++			pr_err("Failed to allocate TX buffer!\n");
++			goto err;
+ 		}
+ 	}
+ 
+ 	return 0;
+-}
+-#endif
+-
+-static inline int ring_dist(int idx1, int idx2, int size)
+-{
+-	if (idx1 >= idx2)
+-		return (idx1 - idx2);
+-	else
+-		return (idx1 + size - idx2);
+-}
+ 
+-static inline int __ring_full(int idx, int cnt, u32 dnum)
+-{
+-	if (ring_dist(idx, cnt, dnum) < dnum - 1)
+-		return 0;
+-	else
+-		return 1;
++err:
++	txlist_free(list);
++	return -1;
+ }
+ 
+-static inline int ring_full(struct aca_ring *ring)
++static void txlist_free(struct tx_list *list)
+ {
+-	if (!__ring_full(ring->idx, ring->cnt, ring->dnum))
+-		return 0;
++	struct tx_list_item *item;
++	int i;
+ 
+-	/* if ring full, update cumulative counter and check again */
+-	ring->cnt = readl(ring->cnt_addr) % ring->dnum;
++	if (list->data) {
++		for (i = 0; i < list->dnum; i++) {
++			item = &list->data[i];
++
++			if (item->buf) {
++				// use plat_mem_free as these buffers are mixed with buffers allocated in ptm_tc.c / atm_tc.c
++				plat_mem_free(item->phyaddr, US_DIR);
++			}
++		}
++	}
+ 
+-	return __ring_full(ring->idx, ring->cnt, ring->dnum);
++	kfree(list->data);
+ }
+ 
+-#define ring_idx_inc(ring, idx)						\
+-	do { ring->idx = (ring->idx + 1) % ring->dnum; } while (0);
+-
+-static inline void ring_cnt_update(struct aca_ring *ring)
++static inline void ring_idx_inc(struct aca_ring *ring)
+ {
+-	ring->cnt = readl(ring->cnt_addr) % ring->dnum;
++	ring->idx = (ring->idx + 1) % ring->dnum;
+ }
+ 
+ static struct sk_buff *txin_skb_prepare(struct sk_buff *skb)
+@@ -399,252 +384,220 @@ static struct sk_buff *txin_skb_prepare(
+ 	return nskb;
+ }
+ 
+-static int ring_mmap(void *mem, int size,
+-	enum dma_data_direction dir, u32 *addr)
+-{
+-	struct device *pdev;
+-	dma_addr_t phy_addr;
+-	struct tc_priv *priv;
+-	u32 addr1;
+-
+-	priv = g_plat_priv->tc_priv;
+-	pdev = priv->ep_dev[0].dev;
+-
+-	phy_addr = dma_map_single(pdev, mem, size, dir);
+-	if (unlikely(dma_mapping_error(pdev, phy_addr))) {
+-		tc_err(priv, MSG_INIT,
+-			"DMA address mapping error: buf: 0x%x, size: %d, dir: %d\n",
+-			(u32)mem, size, dir);
+-		return -ENOMEM;
+-	}
+-	dma_unmap_single(pdev, phy_addr, size, dir);
+-
+-	pr_info("vaddr: 0x%x, phyaddr: 0x%lx\n", (u32)mem, phy_addr);
+-	addr1 = (u32)phy_addr;
+-
+-	if (addr)
+-		*addr = addr1;
+-
+-	return 0;
+-}
+-
+-static void txin_action(struct tc_priv *priv, struct aca_ring *txin,
++static int txin_action(struct tc_priv *priv, struct aca_ring *txin,
+ 		struct sk_buff *skb, int qid, enum tc_pkt_type type)
+ {
+-	struct dma_desc *desc, desc1;
+-	u32 phyaddr, *dst, *src;
+-	int i;
++	struct device *pdev = priv->ep_dev[0].dev;
++	struct aca_ring *txout = &g_plat_priv->soc_rings.txout;
++	struct tx_list *txlist = &g_plat_priv->soc_rings.txlist;
++	struct dma_tx_desc *desc;
++	struct tx_list_item *txlist_item;
++	unsigned long flags;
++
++	if (!g_plat_priv->netdev) {
++		spin_lock_irqsave(&tx_spinlock, flags);
++	}
+ 
+-	if (ring_full(txin)) {
+-		tc_dbg(priv, MSG_TX,
+-			"TXIN Ring Full!: idx: %d, cnt: %d\n",
+-			txin->idx, txin->cnt);
++	if ((txin->idx + 2) % txin->dnum == txout->idx) {
++		if (g_plat_priv->netdev) {
++			netif_stop_queue(g_plat_priv->netdev);
++		}
++	} else if ((txin->idx + 1) % txin->dnum == txout->idx) {
++		tc_err(priv, MSG_TX, "TXIN ring full: txin: %d, txout: %d\n",
++			txin->idx, txout->idx);
+ 		goto err1;
+ 	}
+ 
++	desc = (struct dma_tx_desc *)txin->dbase_mem;
++	desc += txin->idx;
++
++	txlist_item = &txlist->data[txin->idx];
++
+ 	skb = txin_skb_prepare(skb);
+ 	if (!skb)
+-		return;
++		goto err2;
+ 
+-	if (ring_mmap(skb->data, skb->len, DMA_TO_DEVICE, &phyaddr) < 0) {
+-		tc_err(priv, MSG_TX, "TXIN data mmap failed: 0x%x\n",
+-			(unsigned int)skb->data);
+-		goto err1;
+-	}
++	/*
++	 * Copy the data to a buffer in the driver. This is necessary because there doesn't seem to be a timely signal
++	 * from the device when it has consumed a buffer, which would allow to safely free it. The data_ptr is only
++	 * returned in TXOUT after another fixed number of packets (depending on the size of internal buffers) has been
++	 * transmitted, which may not happen in the near future. Making a copy allows to free the SKB here.
++	 */
++	memcpy(txlist_item->buf, skb->data, skb->len);
+ 
+-	/* init a new descriptor for the new skb */
+-	desc = (struct dma_desc *)txin->dbase_mem;
+-	desc += txin->idx;
++	dma_sync_single_range_for_device(pdev, txlist_item->phyaddr, 0, skb->len, DMA_TO_DEVICE);
+ 
+-	memset(desc, 0, sizeof(*desc));
+-	memset(&desc1, 0, sizeof(desc1));
+-	desc1.own = 1;
+-	desc1.c = 1;
+-	desc1.sop = 1;
+-	desc1.eop = 1;
+-	desc1.byte_off = phyaddr & 0x7;
+-	desc1.data_len = skb->len;
+-
+-	desc1.data_ptr = phyaddr & (~(0x7));
+-	desc1.qid = qid;
+-
+-	dst = (u32 *)desc;
+-	src = (u32 *)&desc1;
+-	for (i = 0; i < DW_SZ(desc1); i++)
+-		dst[i] = cpu_to_be32(src[i]);
+-
+-	pr_info("txin idx: %d\n", txin->idx);
+-	pr_info("descriptor dst val:(DW0-DW3): 0x%x, 0x%x, 0x%x, 0x%x\n",
+-		dst[0], dst[1], dst[2], dst[3]);
+-	pr_info("descriptor src val: (DW0-DW3): 0x%x, 0x%x, 0x%x, 0x%x\n",
+-		src[0], src[1], src[2], src[3]);
+-
+-	if (ring_mmap(desc, sizeof(*desc), DMA_TO_DEVICE, NULL) < 0) {
+-		tc_err(priv, MSG_TX, "TXIN descriptor mmap failed: 0x%x\n",
+-			(unsigned int)desc);
++	// this should never happen, the buffers are already aligned by kmalloc
++	if (WARN_ON((txlist_item->phyaddr & 0x7) != 0))
+ 		goto err1;
++
++	if (g_plat_priv->netdev) {
++		netdev_sent_queue(g_plat_priv->netdev, skb->len);
+ 	}
++	txlist_item->len = skb->len;
++
++	memset(desc, 0, sizeof(*desc));
+ 
+-	ring_idx_inc(txin, idx);
++	desc->data_ptr = txlist_item->phyaddr;
++	desc->byte_off = 0;
++	desc->data_len = skb->len;
++	desc->qid = qid;
++
++	desc->sop = 1;
++	desc->eop = 1;
++	desc->c = 0;
++	desc->own = 1;
++
++	dev_consume_skb_any(skb);
++
++	ring_idx_inc(txin);
+ 
+ 	/* update TXIN UMT by 1 */
+ 	writel(1, txin->umt_dst);
+-	pr_info("TXIN send txin packet 1 packet\n");
+ 
+-	/* Free skb */
+-	dev_kfree_skb_any(skb);
++	if (!g_plat_priv->netdev) {
++		spin_unlock_irqrestore(&tx_spinlock, flags);
++	}
+ 
+-	/* check txout for testing*/
+-	//txout_action(plat_to_tcpriv(), &g_plat_priv->soc_rings.txout);
+-	return;
++	return 0;
+ 
+ err1:
+-	//skb->delay_free = 0;
+ 	dev_kfree_skb_any(skb);
++
++err2:
++	if (!g_plat_priv->netdev) {
++		spin_unlock_irqrestore(&tx_spinlock, flags);
++	}
++
++	return -1;
+ }
+ 
+ static void txout_action(struct tc_priv *priv, struct aca_ring *txout)
+ {
+-	int i, cnt;
+-	struct dma_desc *desc;
+-	u32 ptr;
+-	void *mem;
+-
+-	ring_cnt_update(txout);
+-	cnt = ring_dist(txout->idx, txout->cnt, txout->dnum);
++	struct aca_ring *txin = &g_plat_priv->soc_rings.txin;
++	struct tx_list *txlist = &g_plat_priv->soc_rings.txlist;
++	struct tx_list_item *txlist_item;
++	int i, cnt, bytes;
++	u32 *desc;
++	unsigned long flags;
++
++	cnt = 0;
++	bytes = 0;
++
++	if (g_plat_priv->netdev) {
++		netif_tx_lock(g_plat_priv->netdev);
++	} else {
++		spin_lock_irqsave(&tx_spinlock, flags);
++	}
+ 
+-	for (i = 0; i < cnt; i++) {
++	for (i = 0; i < txout->dnum; i++) {
+ 		desc = txout->dbase_mem;
+ 		desc += txout->idx;
+-		/* read from memory */
+-		if (ring_mmap(desc, sizeof(*desc), DMA_FROM_DEVICE, NULL) < 0) {
+-			tc_err(priv, MSG_TX,
+-				"map TXOUT DMA descriptor failed\n");
+-			continue;
++
++		// *desc seems to be a pointer to a QoSQ buffer or the data_ptr of some previously sent packet
++		if (*desc == 0) {
++			break;
+ 		}
+-		ptr = desc->data_ptr + desc->byte_off;
+-		mem = (void * __force)__va(ptr);
+-		kfree(mem);
+-		ring_idx_inc(txout, idx);
+-	}
+ 
+-	if (cnt)
+-		writel(cnt, txout->umt_dst);
+-	pr_info("TXOUT received %d descriptors\n", cnt);
+-}
++		if (txout->idx == txin->idx) {
++			tc_err(priv, MSG_TX, "TXOUT unexpected non-zero descriptor: txin: %d, txout: %d\n",
++				txin->idx, txout->idx);
++			break;
++		}
+ 
+-static void rxin_action(struct tc_priv *priv,
+-		struct aca_ring *rxin, int size, int cnt)
+-{
+-	int i, dist;
+-	struct dma_desc *desc;
+-	void *data_ptr;
+-	u32 phyaddr;
+-
+-	if (ring_full(rxin)) {
+-		tc_dbg(priv, MSG_RX,
+-			"RXIN Ring Full!: idx: %d, cnt: %d\n",
+-			rxin->idx, rxin->cnt);
+-		return;
+-	}
++		txlist_item = &txlist->data[txout->idx];
+ 
+-	dist = ring_dist(rxin->idx, rxin->cnt, rxin->dnum);
+-	if (cnt > dist) {
+-		WARN_ONCE(1, "RXIN NO enough room for free buffers: free: %d, room: %d\n",
+-			cnt, dist);
+-		cnt = dist;
++		cnt++;
++		bytes += txlist_item->len;
++
++		/*
++		 * Reuse the returned buffer. The previous buffer should still be referenced by another descriptor.
++		 * When the driver is unloaded, all buffers in the txlist as well as those referenced by the
++		 * descriptors managed in ptm_tc.c or atm_tc.c will be freed.
++		 */
++		txlist_item->buf = plat_mem_virt(*desc);
++		txlist_item->phyaddr = *desc;
++
++		*desc = 0;
++
++		ring_idx_inc(txout);
+ 	}
+ 
+-	for (i = 0; i < cnt; i++) {
+-		data_ptr = kmalloc(size, GFP_ATOMIC);
+-		if (!data_ptr) {
+-			tc_err(priv, MSG_RX,
+-				"RXIN kmalloc data buffer failed: %d\n", size);
+-			goto err1;
+-		}
++	if (cnt) {
++		writel(cnt, txout->umt_dst+0x28); // TXOUT_HD_ACCUM_SUB instead of TXOUT_HD_ACCUM_ADD
+ 
+-		if (ring_mmap(data_ptr, size, DMA_FROM_DEVICE, &phyaddr) < 0) {
+-			tc_err(priv, MSG_RX,
+-				"RXIN kmalloc data buffer failed: %d\n", size);
+-			goto err2;
++		if (g_plat_priv->netdev) {
++			netdev_completed_queue(g_plat_priv->netdev, cnt, bytes);
+ 		}
++	}
+ 
+-		desc = (struct dma_desc *)rxin->dbase_mem;
+-		desc += rxin->idx;
+-		memset(desc, 0, sizeof(*desc));
+-
+-		desc->data_len = size;
+-		desc->byte_off = phyaddr & 0x7;
+-		desc->eop = 1;
+-		desc->sop = 1;
+-		desc->own = 1;
+-
+-		desc->data_ptr = phyaddr;
+-
+-		
+-		if (ring_mmap(desc, sizeof(*desc), DMA_TO_DEVICE, NULL) < 0) {
+-			tc_err(priv, MSG_RX, "RXIN descriptor mmap failed: 0x%x\n",
+-				(unsigned int)desc);
+-			goto err2;
+-		}
+-		
+-		ring_idx_inc(rxin, idx);
++	if (g_plat_priv->netdev) {
++		netif_tx_unlock(g_plat_priv->netdev);
++	} else {
++		spin_unlock_irqrestore(&tx_spinlock, flags);
+ 	}
+ 
+-	/* update RXIN UMT*/
+-	writel(i, rxin->umt_dst);
+-	pr_info("rxin refill %d descriptors\n", i);
+-	return;
++	if (cnt && g_plat_priv->netdev && netif_queue_stopped(g_plat_priv->netdev)) {
++		netif_wake_queue(g_plat_priv->netdev);
++	}
++}
+ 
+-err2:
+-	kfree(data_ptr);
+-err1:
+-	if (i)
+-		writel(i, rxin->umt_dst);
+-	return;
++static void rxin_action(struct tc_priv *priv,
++		struct aca_ring *rxin, int size, int cnt)
++{
++	/* update RXIN UMT*/
++	writel(cnt, rxin->umt_dst);
+ }
+ 
+ static int rxout_action(struct tc_priv *priv, struct aca_ring *rxout)
+ {
++	struct device *pdev = priv->ep_dev[0].dev;
+ 	int i, cnt;
+-	struct dma_desc *desc;
+-	u32 ptr;
+-	void *mem;
++	struct dma_rx_desc *desc;
++	dma_addr_t phyaddr;
++	void *ptr, *dst;
++	size_t len;
+ 	struct sk_buff *skb;
+ 
+-	ring_cnt_update(rxout);
+-	cnt = ring_dist(rxout->idx, rxout->cnt, rxout->dnum);
+-
+-	for (i = 0; i < cnt; i++) {
++	cnt = 0;
++	for (i = 0; i < rxout->dnum; i++) {
+ 		desc = rxout->dbase_mem;
+ 		desc += rxout->idx;
+ 
+-		/* read from memory */
+-		if (ring_mmap(desc, sizeof(*desc), DMA_FROM_DEVICE, NULL) < 0) {
+-			tc_err(priv, MSG_RX,
+-				"map RXOUT DMA descriptor failed\n");
+-			continue;
++		if (!desc->own) {
++			break;
+ 		}
+-		ptr = desc->data_ptr + desc->byte_off;
+-		mem = __va(ptr);
+-		skb = build_skb(mem, 0);
+-		if (!skb) {
+-			tc_err(priv, MSG_RX,
+-				"RXOUT build skb failed\n");
+-			kfree(mem);
+-			continue;
++
++		// this seems to be a pointer to a DS PKT buffer
++		phyaddr = desc->data_ptr + desc->byte_off;
++		ptr = plat_mem_virt(phyaddr);
++
++		len = desc->data_len;
++
++		dma_sync_single_range_for_cpu(pdev, phyaddr, 0, len, DMA_FROM_DEVICE);
++
++		skb = netdev_alloc_skb(g_plat_priv->netdev, len);
++		if (unlikely(!skb)) {
++			tc_err(priv, MSG_RX, "RXOUT SKB allocation failed\n");
++			break;
+ 		}
+-		priv->tc_ops.recv(NULL, skb);
+-		ring_idx_inc(rxout, idx);
++
++		dst = skb_put(skb, len);
++		memcpy(dst, ptr, len);
++
++		priv->tc_ops.recv(g_plat_priv->netdev, skb);
++
++		desc->own = 0;
++
++		cnt++;
++		ring_idx_inc(rxout);
+ 	}
+ 
+ 	if (!cnt)
+-		tc_err(priv, MSG_RX, "RXOUT dummy interrupt: dbase: 0x%x, idx: %d, cnt: %d\n",
+-			(unsigned int)rxout->dbase_mem, rxout->idx, rxout->cnt);
++		tc_err(priv, MSG_RX, "RXOUT spurious interrupt\n");
+ 	else
+-		writel(cnt, rxout->umt_dst);
++		writel(cnt, rxout->umt_dst+0x28); // RXOUT_HD_ACCUM_SUB instead of RXOUT_HD_ACCUM_ADD
+ 
+-	pr_info("txout received %d packets\n", cnt);
+ 	return cnt;
+ }
+ 
+@@ -669,7 +622,6 @@ static void plat_rxout_tasklet(unsigned
+ 	struct aca_ring *rxin = &priv->soc_rings.rxin;
+ 	struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[rxout->ep_dev_idx];
+ 	int cnt;
+-	
+ 
+ 	cnt = rxout_action(tcpriv, rxout);
+ 	if (cnt)
+@@ -687,68 +639,144 @@ static int plat_send(struct net_device *
+ {
+ 	struct plat_priv *priv = g_plat_priv;
+ 	struct aca_ring *txin = &priv->soc_rings.txin;
++	int res;
+ 
+-	txin_action(priv->tc_priv, txin, skb, qid, type);
++	res = txin_action(priv->tc_priv, txin, skb, qid, type);
+ 
+-	return 0;
++	return res;
++}
++
++static void plat_mem_init(void)
++{
++	struct plat_priv *priv = g_plat_priv;
++
++	hash_init(priv->mem_map);
+ }
+ 
+ /* return virtual address */
+-static void *plat_mem_alloc(size_t size, enum tc_dir dir)
++static void *plat_mem_alloc(size_t size, enum tc_dir dir, u32 *phyaddr)
+ {
+-	return kmalloc(size, GFP_KERNEL);
++	struct plat_priv *priv = g_plat_priv;
++	struct tc_priv *tcpriv = priv->tc_priv;
++	struct device *pdev = tcpriv->ep_dev[0].dev;
++	enum dma_data_direction dma_dir;
++	struct mem_map_entry *entry;
++
++	entry = kzalloc(sizeof(struct mem_map_entry), GFP_KERNEL);
++	if (!entry)
++		goto err;
++
++	entry->size = size;
++
++	entry->mem = kmalloc(size, GFP_KERNEL);
++	if (!entry->mem)
++		goto err_alloc;
++
++	dma_dir = (dir == DS_DIR) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
++
++	entry->phyaddr = dma_map_single(pdev, entry->mem, entry->size, dma_dir);
++	if (unlikely(dma_mapping_error(pdev, entry->phyaddr))) {
++		tc_err(priv, MSG_INIT,
++			"plat_mem_alloc: DMA mapping error: buf: 0x%x, size: %d, dir: %d\n",
++			(u32)entry->mem, size, dir);
++
++		goto err_map;
++	}
++
++	hash_add(g_plat_priv->mem_map, &entry->node, entry->phyaddr);
++
++	*phyaddr = entry->phyaddr;
++	return entry->mem;
++
++err_map:
++	kfree(entry->mem);
++
++err_alloc:
++	kfree(entry);
++
++err:
++	return NULL;
+ }
+ 
+-static void plat_mem_free(u32 phy_addr, enum tc_dir dir)
++static void *plat_mem_virt(u32 phyaddr)
+ {
+-	void *mem;
++	struct mem_map_entry *entry;
++
++	hash_for_each_possible(g_plat_priv->mem_map, entry, node, phyaddr)
++		if (entry->phyaddr == phyaddr)
++			return entry->mem;
++
++	WARN_ON(1);
++	return NULL;
++}
++
++static struct mem_map_entry *plat_mem_entry(u32 phyaddr)
++{
++	struct mem_map_entry *entry;
++
++	hash_for_each_possible(g_plat_priv->mem_map, entry, node, phyaddr)
++		if (entry->phyaddr == phyaddr)
++			return entry;
+ 
+-	mem = (void * __force)__va(phy_addr);
+-	kfree(mem);
++	return NULL;
+ }
+ 
+-static void aca_soc_ring_init(struct tc_priv *priv,
+-			struct aca_ring *ring, u32 dnum, u32 dsize)
++static void plat_mem_free(u32 phyaddr, enum tc_dir dir)
+ {
++	struct tc_priv *priv = g_plat_priv->tc_priv;
++	struct device *pdev = priv->ep_dev[0].dev;
++	enum dma_data_direction dma_dir;
++	struct mem_map_entry *entry;
++
++	entry = plat_mem_entry(phyaddr);
++	if (WARN_ON(!entry))
++		return;
++
++	hash_del(&entry->node);
++
++	dma_dir = (dir == DS_DIR) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
++	dma_unmap_single(pdev, entry->phyaddr, entry->size, dma_dir);
++
++	kfree(entry->mem);
++
++	kfree(entry);
++}
++
++static int ring_init(struct tc_priv *priv, struct aca_ring *ring, u32 dnum, u32 dsize)
++{
++	struct device *pdev = priv->ep_dev[0].dev;
+ 	int size;
+-	struct device *pdev;
+ 
+ 	memset(ring, 0, sizeof(*ring));
+ 	ring->dnum = dnum;
++	ring->dsize = dsize;
++
++	if (ring->dnum == 0) {
++		return 0;
++	}
++
+ 	size = dsize * dnum;
+-	pdev = priv->ep_dev[0].dev;
+ 
+-	ring->dbase_mem = kmalloc(size, GFP_KERNEL);
++	ring->dbase_mem = dma_alloc_coherent(pdev, size, &(ring->dbase_phymem), GFP_KERNEL);
+ 	if (!ring->dbase_mem) {
+-		tc_err(priv, MSG_INIT, "Allocate SoC Ring fail: %d\n", dnum);
+-		return;
++		tc_err(priv, MSG_INIT, "Ring allocation failed: %d\n", dnum);
++		return -1;
+ 	}
+ 
+-	ring_mmap(ring->dbase_mem, size, DMA_FROM_DEVICE, &(ring->dbase_phymem));
+-	tc_dbg(priv, MSG_INIT, "ring: membase: 0x%x, phybase: 0x%x, dnum: %d\n",
+-		(u32)ring->dbase_mem, ring->dbase_phymem, ring->dnum);
+-
+-	size = sizeof(u32);
+-	ring->cnt_addr = kzalloc(size, GFP_KERNEL);
+-	if (!ring->cnt_addr) {
+-		tc_err(priv, MSG_INIT, "Allocate cumulative counter fail!\n");
+-		return;
+-	}
++	return 0;
++}
+ 
+-	ring_mmap(ring->cnt_addr, size, DMA_TO_DEVICE, &(ring->cnt_phyaddr));
+-	tc_dbg(priv, MSG_INIT, "ring: cumulative cnt addr: 0x%x, phy address: 0x%x\n",
+-		(u32)ring->cnt_addr, ring->cnt_phyaddr);
++#define ring_dnum(tcpriv, name1, name2) ((!tcpriv->param.name1##_dnum) ? name2##_DNUM : tcpriv->param.name1##_dnum)
+ 
+-	return;
+-}
++static void ring_free(struct tc_priv *priv, struct aca_ring *ring)
++{
++	struct device *pdev = priv->ep_dev[0].dev;
+ 
+-#define ring_init(tcpriv, ring, name1, name2, num, size)	\
+-{								\
+-	if (!tcpriv->param.name1##_dnum)			\
+-		num = name2##_DNUM;				\
+-	else							\
+-		num = tcpriv->param.name1##_dnum;		\
+-	aca_soc_ring_init(tcpriv, ring, num, size);		\
++	if (ring->dnum == 0) {
++		return;
++	}
++
++	dma_free_coherent(pdev, ring->dsize * ring->dnum, ring->dbase_mem, ring->dbase_phymem);
+ }
+ 
+ static irqreturn_t aca_rx_irq_handler(int irq, void *dev_id)
+@@ -777,39 +805,55 @@ static irqreturn_t aca_tx_irq_handler(in
+ 	return IRQ_HANDLED;
+ }
+ 
+-static void irq_init(struct tc_priv *priv, const char *dev_name)
++static void plat_irq_init(struct tc_priv *priv, const char *dev_name)
+ {
+ 	int ret;
+ 	int i;
+-	char name[IFNAMSIZ];
++	//char name[IFNAMSIZ];
+ 
+ 	for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
+-		sprintf(name, "%s%d", dev_name, i);
++		//snprintf(name, sizeof(name), "aca-rxo%d", i);
+ 
+ 		ret = devm_request_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_rx_irq,
+-				aca_rx_irq_handler, 0, name, &priv->ep_dev[i]);
++				aca_rx_irq_handler, 0, "aca-rxo", &priv->ep_dev[i]);
+ 
+ 		if (ret) {
+ 			tc_err(priv, MSG_INIT,
+ 				"ACA RX IRQ request Fail!: irq: %d, ep_id: %d\n",
+ 				priv->ep_dev[i].aca_rx_irq, i);
+ 			//return;
+-		} 
++		}
++
++		//snprintf(name, sizeof(name), "aca-txo%d", i);
+ 
+ 		ret = devm_request_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_tx_irq,
+-				aca_tx_irq_handler, 0, name, &priv->ep_dev[i]);
++				aca_tx_irq_handler, 0, "aca-txo", &priv->ep_dev[i]);
+ 
+ 		if (ret) {
+ 			tc_err(priv, MSG_INIT,
+ 				"ACA TX IRQ request Fail!: irq: %d, ep_id: %d\n",
+ 				priv->ep_dev[i].aca_tx_irq, i);
+ 			//return;
+-		} 
++		}
+ 	}
+ 
+ 	return;
+ }
+ 
++static void plat_irq_free(struct tc_priv *priv)
++{
++	int i;
++
++	for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
++
++		/* Unregister RX irq handler */
++		devm_free_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_rx_irq, &priv->ep_dev[i]);
++
++		/* Unregister TX irq handler */
++		devm_free_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_tx_irq, &priv->ep_dev[i]);
++	}
++}
++
+ /**
+  * Decide txin/rxout queue size
+  * Create a tx/rx queue
+@@ -819,29 +863,68 @@ static int plat_dp_init(struct plat_priv
+ 	struct tc_priv *tcpriv;
+ 	struct aca_ring_grp *soc_rings;
+ 	struct aca_ring *ring;
+-	int size;
+ 	u32 dnum;
++	int i;
++	int ret = 0;
+ 
+ 	tcpriv = priv->tc_priv;
+ 
+-	size = sizeof(struct dma_desc);
++	plat_mem_init();
++
+ 	soc_rings = &priv->soc_rings;
+ 
+ 	/* txin ring */
+ 	ring = &soc_rings->txin;
+-	ring_init(tcpriv, ring, txin, TXIN, dnum, size);
++	dnum = ring_dnum(tcpriv, txin, TXIN);
++	ret = txlist_init(&soc_rings->txlist, dnum);
++	if (ret < 0)
++		goto err5;
++	ret = ring_init(tcpriv, ring, dnum, sizeof(struct dma_tx_desc));
++	if (ret < 0)
++		goto err4;
+ 
+ 	/* txout ring */
+ 	ring = &soc_rings->txout;
+-	ring_init(tcpriv, ring, txout, TXOUT, dnum, size);
++	dnum = ring_dnum(tcpriv, txout, TXOUT);
++	ret = ring_init(tcpriv, ring, dnum, sizeof(u32));
++	if (ret < 0)
++		goto err3;
++
+ 	/* rxin ring */
+ 	ring = &soc_rings->rxin;
+-	ring_init(tcpriv, ring, rxin, RXIN, dnum, size);
++	dnum = ring_dnum(tcpriv, rxin, RXIN);
++	ret |= ring_init(tcpriv, ring, dnum, sizeof(struct dma_rx_desc));
++	if (ret < 0)
++		goto err2;
++
+ 	/* rxout ring */
+ 	ring = &soc_rings->rxout;
+-	ring_init(tcpriv, ring, rxout, RXOUT, dnum, size);
++	dnum = ring_dnum(tcpriv, rxout, RXOUT);
++	ret = ring_init(tcpriv, ring, dnum, sizeof(struct dma_rx_desc));
++	if (ret < 0)
++		goto err1;
++
++	for (i = 0; i < EP_MAX_NUM && i < tcpriv->ep_num; i++) {
++
++ 		/* Enable RX interrupt */
++ 		tcpriv->ep_dev[i].hw_ops->icu_en(&tcpriv->ep_dev[i], ACA_HOSTIF_RX);
++
++ 		/* Enable TX interrupt */
++ 		tcpriv->ep_dev[i].hw_ops->icu_en(&tcpriv->ep_dev[i], ACA_HOSTIF_TX);
++ 	}
+ 
+ 	return 0;
++
++err1:
++	ring_free(tcpriv, &soc_rings->rxin);
++err2:
++	ring_free(tcpriv, &soc_rings->txout);
++err3:
++	ring_free(tcpriv, &soc_rings->txin);
++err4:
++	txlist_free(&soc_rings->txlist);
++err5:
++	return ret;
+ }
+ 
+ /**
+@@ -850,6 +933,26 @@ static int plat_dp_init(struct plat_priv
+  */
+ static void plat_dp_exit(struct plat_priv *priv)
+ {
++	struct tc_priv *tcpriv = priv->tc_priv;
++	struct aca_ring_grp *soc_rings = &priv->soc_rings;
++	int i;
++
++	for (i = 0; i < EP_MAX_NUM && i < tcpriv->ep_num; i++) {
++
++		/* Disable RX interrupt */
++		tcpriv->ep_dev[i].hw_ops->icu_mask(&tcpriv->ep_dev[i], ACA_HOSTIF_RX);
++
++		/* Disable TX interrupt */
++		tcpriv->ep_dev[i].hw_ops->icu_mask(&tcpriv->ep_dev[i], ACA_HOSTIF_TX);
++	}
++
++	ring_free(tcpriv, &soc_rings->txin);
++	ring_free(tcpriv, &soc_rings->txout);
++	ring_free(tcpriv, &soc_rings->rxin);
++	ring_free(tcpriv, &soc_rings->rxout);
++
++	txlist_free(&soc_rings->txlist);
++
+ 	return;
+ }
+ 
+@@ -858,45 +961,45 @@ static int plat_soc_cfg_get(struct soc_c
+ 	struct plat_priv *priv = g_plat_priv;
+ 
+ 	/* TXIN */
+-	cfg->txin_dbase = priv->soc_rings.txin.dbase_phymem;
+-	cfg->txin_dnum = priv->soc_rings.txin.dnum;
+-	cfg->txin_desc_dwsz = DW_SZ(struct dma_desc);
+-	cfg->txin_cnt_phyaddr = priv->soc_rings.txin.cnt_phyaddr;
++	cfg->txin.soc_phydbase = priv->soc_rings.txin.dbase_phymem;
++	cfg->txin.soc_dnum = priv->soc_rings.txin.dnum;
++	cfg->txin.soc_desc_dwsz = DW_SZ(struct dma_tx_desc);
+ 	/* TXOUT */
+-	cfg->txout_dbase = priv->soc_rings.txout.dbase_phymem;
+-	cfg->txout_dnum = priv->soc_rings.txout.dnum;
+-	cfg->txout_desc_dwsz = DW_SZ(struct dma_desc);
+-	cfg->txout_cnt_phyaddr = priv->soc_rings.txout.cnt_phyaddr;
++	cfg->txout.soc_phydbase = priv->soc_rings.txout.dbase_phymem;
++	cfg->txout.soc_dnum = priv->soc_rings.txout.dnum;
++	cfg->txout.soc_desc_dwsz = DW_SZ(u32);
+ 	/* RXOUT */
+-	cfg->rxout_dbase = priv->soc_rings.rxout.dbase_phymem;
+-	cfg->rxout_dnum = priv->soc_rings.rxout.dnum;
+-	cfg->rxout_desc_dwsz = DW_SZ(struct dma_desc);
+-	cfg->rxout_cnt_phyaddr = priv->soc_rings.rxout.cnt_phyaddr;
++	cfg->rxout.soc_phydbase = priv->soc_rings.rxout.dbase_phymem;
++	cfg->rxout.soc_dnum = priv->soc_rings.rxout.dnum;
++	cfg->rxout.soc_desc_dwsz = DW_SZ(struct dma_rx_desc);
+ 	/* RXIN */
+-	cfg->rxin_dbase = priv->soc_rings.rxin.dbase_phymem;
+-	cfg->rxin_dnum = priv->soc_rings.rxin.dnum;
+-	cfg->rxin_desc_dwsz = DW_SZ(struct dma_desc);
+-	cfg->rxin_cnt_phyaddr = priv->soc_rings.rxin.cnt_phyaddr;
++	cfg->rxin.soc_phydbase = priv->soc_rings.rxin.dbase_phymem;
++	cfg->rxin.soc_dnum = priv->soc_rings.rxin.dnum;
++	cfg->rxin.soc_desc_dwsz = DW_SZ(struct dma_rx_desc);
+ 
+ 	tc_info(priv->tc_priv, MSG_INIT,
+ 		"id: %d, txin(0x%x: %d, 0x%x), txout(0x%x: %d, 0x%x), rxin(0x%x: %d, 0x%x), rxout(0x%x: %d, 0x%x)\n",
+-		id, cfg->txin_dbase, cfg->txin_dnum, cfg->txin_cnt_phyaddr,
+-		cfg->txout_dbase, cfg->txout_dnum, cfg->txout_cnt_phyaddr,
+-		cfg->rxin_dbase, cfg->rxout_dnum, cfg->rxin_cnt_phyaddr,
+-		cfg->rxout_dbase, cfg->rxout_dnum, cfg->rxout_cnt_phyaddr);
++		id, cfg->txin.soc_phydbase, cfg->txin.soc_dnum, cfg->txin.soc_cnt_phyaddr,
++		cfg->txout.soc_phydbase, cfg->txout.soc_dnum, cfg->txout.soc_cnt_phyaddr,
++		cfg->rxin.soc_phydbase, cfg->rxin.soc_dnum, cfg->rxin.soc_cnt_phyaddr,
++		cfg->rxout.soc_phydbase, cfg->rxout.soc_dnum, cfg->rxout.soc_cnt_phyaddr);
+ 
+ 	return 0;
+ }
+ 
+-static int plat_open(struct net_device *pdev, char *dev_name,
+-		int *subif, int flag)
++static int plat_open(struct net_device *pdev, const char *dev_name,
++		int id, int flag)
+ {
++	g_plat_priv->netdev = pdev;
++
+ 	return 0;
+ }
+ 
+-static void plat_close(struct net_device *pdev, char *dev_name,
+-		int subif, int flag)
++static void plat_close(struct net_device *pdev, const char *dev_name,
++		int flag)
+ {
++	g_plat_priv->netdev = NULL;
++
+ 	return;
+ }
+ 
+@@ -971,7 +1074,6 @@ static void plat_disable_us(int en)
+ static int plat_get_mib(struct net_device *pdev,
+ 			struct rtnl_link_stats64 *stat)
+ {
+-	pr_info("%s is not supported\n", __func__);
+ 	return -ENOTSUPP;
+ }
+ 
+@@ -1181,8 +1283,8 @@ int platform_init(struct tc_priv *tc_pri
+ 	INIT_WORK(&priv->req_work.work, plat_tc_req_workqueue);
+ 	tasklet_init(&txout_task, plat_txout_tasklet, 0);
+ 	tasklet_init(&rxout_task, plat_rxout_tasklet, 0);
+-	irq_init(tc_priv, drv_name);
+-	//tasklet_init(&priv->coc.coc_task, plat_coc_tasklet, 0);
++	plat_irq_init(tc_priv, drv_name);
++
+ 	plat_tc_ops_setup(tc_priv);
+ 	plat_dsl_ops_setup();
+ 
+@@ -1201,8 +1303,15 @@ void platform_dsl_exit(void)
+ 
+ void platform_exit(void)
+ {
+-	//tasklet_kill(&g_plat_priv->coc.coc_task);
++	struct tc_priv *tcpriv = plat_to_tcpriv();
++
++	tasklet_kill(&txout_task);
++	tasklet_kill(&rxout_task);
++
++	plat_irq_free(tcpriv);
++
+ 	plat_dp_exit(g_plat_priv);
++
+ 	g_plat_priv = NULL;
+ }
+ 
diff --git a/package/kernel/lantiq/vrx518_tc/patches/201-desc-length.patch b/package/kernel/lantiq/vrx518_tc/patches/201-desc-length.patch
new file mode 100644
index 0000000000..8b30914df9
--- /dev/null
+++ b/package/kernel/lantiq/vrx518_tc/patches/201-desc-length.patch
@@ -0,0 +1,342 @@
+Port FEATURE_CONF_DESC_LENGTH from the grx500 variant of the driver.
+This also reduces the default length of some descriptors, resulting in
+significantly lower latencies when the line is saturated.
+
+--- a/dcdp/inc/tc_common.h
++++ b/dcdp/inc/tc_common.h
+@@ -27,7 +27,11 @@
+ #define UMT_DEF_PERIOD		400	/* microseconds */
+ 
+ #define MAX_MTU			(DMA_PACKET_SZ - ETH_HLEN - HD_RSRV_SZ)
++#ifdef FEATURE_CONF_DESC_LENGTH
++#define QOSQ_NUM		8
++#else
+ #define QOSQ_NUM		2
++#endif
+ #define FW_STOP_TIMEOUT		20	/* millisecond */
+ #define QOS_DISPATCH_OWN	0
+ #define ACA_TXIN_POLL_INTVAL	10	/* millisecond */
+--- a/dcdp/inc/tc_main.h
++++ b/dcdp/inc/tc_main.h
+@@ -30,6 +30,7 @@
+ #define TCPRIV_ALIGN	32
+ #define DMA_PACKET_SZ	2048
+ 
++#define FEATURE_CONF_DESC_LENGTH 1
+ #define FEATURE_POWER_DOWN 1
+ 
+ enum {
+@@ -157,6 +158,25 @@ struct tc_param {
+ 	unsigned int txout_dnum;
+ 	unsigned int rxin_dnum;
+ 	unsigned int rxout_dnum;
++
++#ifdef FEATURE_CONF_DESC_LENGTH
++	/* __US_FAST_PATH_DES_LIST_NUM:64
++	 * __ACA_TX_IN_PD_LIST_NUM
++	 * __ACA_TX_OUT_PD_LIST_NUM
++	 * */
++	u32 conf_us_fp_desq_len;
++	/*
++	 * Number of queue per QoS queue: QOS_DES_NUM / QOSQ_NUM
++	 * */
++	u32 conf_us_qos_queue_len;
++	/* __US_OUTQ0_DES_LIST_NUM: 32
++	 * __US_OUTQ1_DES_LIST_NUM: 32
++	 * OUTQ_DESC_PER_Q
++	 * */
++	u32 conf_us_outq_len;
++	/**/
++	u32 conf_us_local_q0_desq_len;
++#endif
+ };
+ 
+ struct cdma {
+--- a/dcdp/ptm_tc.c
++++ b/dcdp/ptm_tc.c
+@@ -75,7 +75,11 @@ static const u32 tx_kvec[] = {
+ 	0x30B1B233, 0xB43536B7, 0xB8393ABB, 0x3CBDBE3F,
+ 	0xC04142C3, 0x44C5C647, 0x48C9CA4B, 0xCC4D4ECF
+ };
++#ifndef FEATURE_CONF_DESC_LENGTH
+ static const u32 def_outq_map[OUTQ_PNUM] = {0x1, 0xFE};
++#else
++static const u32 def_outq_map[OUTQ_PNUM] = {0x0, 0xFF};
++#endif
+ static const char ptm_drv_name[] = "PTM SL";
+ static const char ptm_bond_name[][IFNAMSIZ] = {"PTM US BOND", "PTM DS BOND"};
+ 
+@@ -1005,6 +1009,10 @@ static void us_fp_desq_cfg_ctxt_init(str
+ 	int i;
+ 	u32 desc_addr;
+ 	rx_descriptor_t desc;
++#ifdef FEATURE_CONF_DESC_LENGTH
++	struct tc_priv *tc_priv;
++	tc_priv = priv->tc_priv;
++#endif
+ 
+ 	memset(&desq_cfg, 0, sizeof(desq_cfg));
+ 	/* Initialize US Fast-Path Descriptor Queue Config/Context */
+@@ -1012,7 +1020,11 @@ static void us_fp_desq_cfg_ctxt_init(str
+ 	desq_cfg.fast_path	= 1;
+ 	desq_cfg.mbox_int_en	= 0;
+ 	desq_cfg.des_sync_needed = 0;
++#ifndef FEATURE_CONF_DESC_LENGTH
+ 	desq_cfg.des_num	= __US_FAST_PATH_DES_LIST_NUM;
++#else
++	desq_cfg.des_num	= tc_priv->param.conf_us_fp_desq_len;
++#endif
+ 	desq_cfg.des_base_addr	= __US_FAST_PATH_DES_LIST_BASE;
+ 
+ 	tc_mem_write(priv, fpi_addr(__US_FP_INQ_DES_CFG_CTXT),
+@@ -1036,12 +1048,20 @@ static void us_qos_desq_cfg_ctxt_init(st
+ 	int offset, i;
+ 	rx_descriptor_t desc;
+ 	u32 phy_addr;
++#ifdef FEATURE_CONF_DESC_LENGTH
++	struct tc_priv *tc_priv;
++	tc_priv = priv->tc_priv;
++#endif
+ 
+ 	/* Setup QoSQ_CFG_CTXT */
+ 	memset(&qosq_cfg_ctxt, 0, sizeof(qosq_cfg_ctxt));
+ 
+ 	qosq_cfg_ctxt.threshold = 8;
++#ifdef FEATURE_CONF_DESC_LENGTH
++	qosq_cfg_ctxt.des_num	= tc_priv->param.conf_us_qos_queue_len;
++#else
+ 	qosq_cfg_ctxt.des_num	= QOS_DES_NUM / QOSQ_NUM;
++#endif
+ 
+ 	offset = 0;
+ 	for (i = 0; i < QOSQ_NUM; i++) {
+@@ -1080,6 +1100,10 @@ static void us_outq_desq_cfg_ctxt_init(s
+ 	u32 phy_addr;
+ 	int i;
+ 	u32 offset;
++#ifdef FEATURE_CONF_DESC_LENGTH
++	struct tc_priv *tc_priv;
++	tc_priv = priv->tc_priv;
++#endif
+ 
+ 	/* Setup OUTQ_QoS_CFG_CTXT */
+ 	/* NOTE: By default, Shaping & WFQ both are DISABLED!! */
+@@ -1108,7 +1132,11 @@ static void us_outq_desq_cfg_ctxt_init(s
+ 	desq_cfg.des_in_own_val	= US_OUTQ_DES_OWN;
+ 	desq_cfg.mbox_int_en	= 0;
+ 	desq_cfg.des_sync_needed = 0;
+-	desq_cfg.des_num	= 32;
++#ifndef FEATURE_CONF_DESC_LENGTH
++	desq_cfg.des_num	= OUTQ_DESC_PER_Q;
++#else
++	desq_cfg.des_num	= tc_priv->param.conf_us_outq_len;
++#endif
+ 	/**
+ 	* Only BC0 is used in VRX518
+ 	*/
+@@ -1174,7 +1202,11 @@ static void us_qos_cfg_init(struct ptm_e
+ 	/* Set QoS NO DROP */
+ 	sb_w32(1, __QOSQ_NO_DROP);
+ 	/* Enable Preemption function/Disable QoS by default */
++#ifdef FEATURE_CONF_DESC_LENGTH
++	sb_w32(0, _CHK_PREEMP_MAP);
++#else
+ 	sb_w32(1, _CHK_PREEMP_MAP);
++#endif
+ 	/* By default, all qid mappint to non-preemption queue */
+ 	sb_w32(0x0, _QID2PREEMP_MAP);
+ 
+@@ -1376,6 +1408,11 @@ static void ptm_local_desq_cfg_ctxt_init
+ 	u32 dcnt, addr, pdbram_base;
+ 	unsigned int us_des_alloc[] = {
+ 		__US_TC_LOCAL_Q0_DES_LIST_NUM, __US_TC_LOCAL_Q1_DES_LIST_NUM};
++#ifdef FEATURE_CONF_DESC_LENGTH
++	struct tc_priv *tc_priv;
++	tc_priv = priv->tc_priv;
++	us_des_alloc[0] = tc_priv->param.conf_us_local_q0_desq_len;
++#endif
+ 
+ 	/* Setup the Local DESQ Configuration/Context for UpStream Queues */
+ 	memset(&desq_cfg, 0, sizeof(desq_cfg));
+@@ -2321,6 +2358,10 @@ static void ptm_aca_init(struct ptm_ep_p
+ 	u32 phybase = priv->ep->phy_membase;
+ 	u32 start;
+ 	u32 type;
++#ifdef FEATURE_CONF_DESC_LENGTH
++	struct tc_priv *tc_priv;
++	tc_priv = priv->tc_priv;
++#endif
+ 
+ 	priv->tc_priv->tc_ops.soc_cfg_get(&priv->tc_priv->cfg, ptm_id(priv));
+ 	memset(&param, 0, sizeof(param));
+@@ -2334,7 +2375,11 @@ static void ptm_aca_init(struct ptm_ep_p
+ #endif
+ 	txin->hd_size_in_dw = cfg->txin.soc_desc_dwsz;
+ 	txin->pd_desc_base = SB_XBAR_ADDR(__ACA_TX_IN_PD_LIST_BASE);
++#ifndef FEATURE_CONF_DESC_LENGTH
+ 	txin->pd_desc_num = __ACA_TX_IN_PD_LIST_NUM;
++#else
++	txin->pd_desc_num = tc_priv->param.conf_us_fp_desq_len;
++#endif
+ 	txin->pd_size_in_dw = DESC_DWSZ;
+ 	txin->soc_desc_base = cfg->txin.soc_phydbase;
+ 	txin->soc_desc_num = cfg->txin.soc_dnum;
+--- a/dcdp/tc_main.c
++++ b/dcdp/tc_main.c
+@@ -182,6 +182,12 @@ static inline void init_local_param(stru
+ 	priv->param.txout_dnum = txout_num;
+ 	priv->param.rxin_dnum = rxin_num;
+ 	priv->param.rxout_dnum = rxout_num;
++#ifdef FEATURE_CONF_DESC_LENGTH
++	priv->param.conf_us_fp_desq_len = 32;
++	priv->param.conf_us_qos_queue_len = 32;
++	priv->param.conf_us_outq_len = 32;
++	priv->param.conf_us_local_q0_desq_len = 16;
++#endif
+ 	priv->tc_mode = TC_NONE_MODE;
+ 	priv->tc_stat = NO_TC;
+ 
+--- a/dcdp/tc_proc.c
++++ b/dcdp/tc_proc.c
+@@ -1114,6 +1114,9 @@ static int proc_read_ver(struct seq_file
+ 		(date >> 16) & 0xff,
+ 		(date & 0xffff));
+ 
++#ifdef FEATURE_CONF_DESC_LENGTH
++	seq_puts(seq, " + Support QoS and Configurable descriptor length\n");
++#endif
+ #ifdef FEATURE_POWER_DOWN
+ 	seq_puts(seq, " + Support Power Down enhancement feature\n");
+ #endif
+@@ -1166,6 +1169,113 @@ static const struct proc_ops tc_soc_proc
+ 	.proc_release	= single_release,
+ };
+ 
++#ifdef FEATURE_CONF_DESC_LENGTH
++static ssize_t proc_write_desc_conf(struct file *file, const char __user *buf,
++			size_t count, loff_t *data)
++{
++	struct tc_priv *priv;
++	char str[32];
++	int len, rlen, temp;
++	int num, temp_num;
++	char *param_list[20];
++	len = count < sizeof(str) ? count : sizeof(str) - 1;
++	rlen = len - copy_from_user(str, buf, len);
++	str[rlen] = 0;
++
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	priv = (struct tc_priv *)PDE_DATA(file_inode(file));
++	if (priv == NULL)
++		return count;
++
++	num = vrx_split_buffer(str, param_list, ARRAY_SIZE(param_list));
++	if (num < 1 || num > 4)
++		goto proc_dbg_desc_conf;
++
++	temp_num = num;
++	if (num-- != 0) {
++		temp = vrx_atoi(param_list[0]);
++		if (temp < 1 || temp > 128) {
++			pr_info("Fastpath valid range: 1 -> 128\n");
++			goto proc_dbg_desc_conf;
++		}
++	}
++	if (num-- != 0) {
++		temp = vrx_atoi(param_list[1]);
++		if (temp < 1 || temp > 63) {
++			pr_info("QoS valid range: 1 -> 63\n");
++			goto proc_dbg_desc_conf;
++		}
++	}
++	if (num-- != 0) {
++		temp = vrx_atoi(param_list[2]);
++		if (temp < 1 || temp > 128) {
++			pr_info("OutQ valid range: 1 -> 128\n");
++			goto proc_dbg_desc_conf;
++		}
++	}
++	if (num-- != 0) {
++		temp = vrx_atoi(param_list[3]);
++		if (temp < 4 || temp > 16) {
++			pr_info("Local Q0 valid range: 4 -> 16\n");
++			goto proc_dbg_desc_conf;
++		}
++	}
++	num = temp_num;
++	if (num-- != 0) {
++		priv->param.conf_us_fp_desq_len = vrx_atoi(param_list[0]);
++	}
++	if (num-- != 0) {
++		priv->param.conf_us_qos_queue_len = vrx_atoi(param_list[1]);
++	}
++	if (num-- != 0) {
++		priv->param.conf_us_outq_len = vrx_atoi(param_list[2]);
++	}
++	if (num-- != 0) {
++		priv->param.conf_us_local_q0_desq_len = vrx_atoi(param_list[3]);
++	}
++
++	return count;
++
++proc_dbg_desc_conf:
++	pr_info("echo [FP] [QoS] [OutQ] [LocalQ0]> desc_conf\n");
++	return count;
++}
++
++static int proc_read_desc_conf(struct seq_file *seq, void *v)
++{
++	struct tc_priv *priv;
++	priv = (struct tc_priv *)seq->private;
++	if (priv == NULL)
++		return -1;
++	seq_puts(seq, "Upstream descriptor length information:\n");
++	seq_printf(seq, " - Fastpath: %d\n",
++		priv->param.conf_us_fp_desq_len);
++	seq_printf(seq, " - QoS: %d\n",
++		priv->param.conf_us_qos_queue_len);
++	seq_printf(seq, " - OutQ: %d\n",
++		priv->param.conf_us_outq_len);
++	seq_printf(seq, " - Local Q0: %d\n",
++		priv->param.conf_us_local_q0_desq_len);
++	seq_puts(seq, "\n");
++	return 0;
++}
++
++static int proc_read_desc_conf_seq_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, proc_read_desc_conf, PDE_DATA(inode));
++}
++#endif
++
++static const struct proc_ops tc_desc_conf_proc_fops = {
++	.proc_open	= proc_read_desc_conf_seq_open,
++	.proc_read	= seq_read,
++	.proc_write	= proc_write_desc_conf,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++};
++
+ static struct tc_proc_list tc_procs[] = {
+ 	{TC_PROC_DIR,	0,	NULL,			1},
+ 	{"cfg",		0644, &tc_cfg_proc_fops,	0},
+@@ -1174,6 +1284,9 @@ static struct tc_proc_list tc_procs[] =
+ 	{"showtime",	0200, &tc_show_time_proc_fops,	0},
+ 	{"ver",		0644, &tc_ver_proc_fops,	0},
+ 	{"soc",		0644, &tc_soc_proc_fops,	0},
++#ifdef FEATURE_CONF_DESC_LENGTH
++	{"desc_conf",	0644, &tc_desc_conf_proc_fops,	0},
++#endif
+ };
+ 
+ int tc_proc_init(struct tc_priv *priv)
+@@ -1333,7 +1446,6 @@ proc_ptm_cfg_help:
+ 	return count;
+ }
+ 
+-
+ static const struct proc_ops ptm_cfg_proc_fops = {
+ 	.proc_open = proc_read_cfg_seq_open,
+ 	.proc_read = seq_read,
diff --git a/package/kernel/lantiq/vrx518_tc/patches/202-napi.patch b/package/kernel/lantiq/vrx518_tc/patches/202-napi.patch
new file mode 100644
index 0000000000..55f0cc1066
--- /dev/null
+++ b/package/kernel/lantiq/vrx518_tc/patches/202-napi.patch
@@ -0,0 +1,423 @@
+--- a/dcdp/platform/sw_plat.c
++++ b/dcdp/platform/sw_plat.c
+@@ -208,6 +208,8 @@ struct plat_priv {
+ 	struct tc_req req_work;
+ 	struct aca_ring_grp soc_rings;
+ 	struct net_device *netdev;
++	struct napi_struct *napi_tx;
++	struct napi_struct *napi_rx;
+ 	DECLARE_HASHTABLE(mem_map, 8);
+ };
+ 
+@@ -472,7 +474,7 @@ err2:
+ 	return -1;
+ }
+ 
+-static void txout_action(struct tc_priv *priv, struct aca_ring *txout)
++static int txout_action(struct tc_priv *priv, struct aca_ring *txout, int budget)
+ {
+ 	struct aca_ring *txin = &g_plat_priv->soc_rings.txin;
+ 	struct tx_list *txlist = &g_plat_priv->soc_rings.txlist;
+@@ -490,7 +492,10 @@ static void txout_action(struct tc_priv
+ 		spin_lock_irqsave(&tx_spinlock, flags);
+ 	}
+ 
+-	for (i = 0; i < txout->dnum; i++) {
++	if (budget == 0 || budget > txout->dnum)
++		budget = txout->dnum;
++
++	for (i = 0; i < budget; i++) {
+ 		desc = txout->dbase_mem;
+ 		desc += txout->idx;
+ 
+@@ -540,6 +545,8 @@ static void txout_action(struct tc_priv
+ 	if (cnt && g_plat_priv->netdev && netif_queue_stopped(g_plat_priv->netdev)) {
+ 		netif_wake_queue(g_plat_priv->netdev);
+ 	}
++
++	return cnt;
+ }
+ 
+ static void rxin_action(struct tc_priv *priv,
+@@ -549,7 +556,7 @@ static void rxin_action(struct tc_priv *
+ 	writel(cnt, rxin->umt_dst);
+ }
+ 
+-static int rxout_action(struct tc_priv *priv, struct aca_ring *rxout)
++static int rxout_action(struct tc_priv *priv, struct aca_ring *rxout, int budget)
+ {
+ 	struct device *pdev = priv->ep_dev[0].dev;
+ 	int i, cnt;
+@@ -559,8 +566,11 @@ static int rxout_action(struct tc_priv *
+ 	size_t len;
+ 	struct sk_buff *skb;
+ 
++	if (budget == 0 || budget > rxout->dnum)
++		budget = rxout->dnum;
++
+ 	cnt = 0;
+-	for (i = 0; i < rxout->dnum; i++) {
++	for (i = 0; i < budget; i++) {
+ 		desc = rxout->dbase_mem;
+ 		desc += rxout->idx;
+ 
+@@ -593,14 +603,30 @@ static int rxout_action(struct tc_priv *
+ 		ring_idx_inc(rxout);
+ 	}
+ 
+-	if (!cnt)
+-		tc_err(priv, MSG_RX, "RXOUT spurious interrupt\n");
+-	else
++	if (cnt)
+ 		writel(cnt, rxout->umt_dst+0x28); // RXOUT_HD_ACCUM_SUB instead of RXOUT_HD_ACCUM_ADD
+ 
+ 	return cnt;
+ }
+ 
++static int plat_txout_napi(struct napi_struct *napi, int budget)
++{
++	struct plat_priv *priv = g_plat_priv;
++	struct tc_priv *tcpriv = plat_to_tcpriv();
++	struct aca_ring *txout = &priv->soc_rings.txout;
++	struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[txout->ep_dev_idx];
++	int cnt;
++
++	cnt = txout_action(tcpriv, txout, budget);
++
++	if (cnt < budget) {
++		if (napi_complete_done(napi, cnt))
++			ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_TX);
++	}
++
++	return cnt;
++}
++
+ static void plat_txout_tasklet(unsigned long arg)
+ {
+ 	struct plat_priv *priv = g_plat_priv;
+@@ -608,12 +634,33 @@ static void plat_txout_tasklet(unsigned
+ 	struct aca_ring *txout = &priv->soc_rings.txout;
+ 	struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[txout->ep_dev_idx];
+ 
+-	txout_action(tcpriv, txout);
++	txout_action(tcpriv, txout, 0);
+ 
+ 	/* Enable interrupt */
+ 	ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_TX);
+ }
+ 
++static int plat_rxout_napi(struct napi_struct *napi, int budget)
++{
++	struct plat_priv *priv = g_plat_priv;
++	struct tc_priv *tcpriv = plat_to_tcpriv();
++	struct aca_ring *rxout = &priv->soc_rings.rxout;
++	struct aca_ring *rxin = &priv->soc_rings.rxin;
++	struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[rxout->ep_dev_idx];
++	int cnt;
++
++	cnt = rxout_action(tcpriv, rxout, budget);
++	if (cnt)
++		rxin_action(tcpriv, rxin, DMA_PACKET_SZ, cnt);
++
++	if (cnt < budget) {
++		if (napi_complete_done(napi, cnt))
++			ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_RX);
++	}
++
++	return cnt;
++}
++
+ static void plat_rxout_tasklet(unsigned long arg)
+ {
+ 	struct plat_priv *priv = g_plat_priv;
+@@ -623,7 +670,7 @@ static void plat_rxout_tasklet(unsigned
+ 	struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[rxout->ep_dev_idx];
+ 	int cnt;
+ 
+-	cnt = rxout_action(tcpriv, rxout);
++	cnt = rxout_action(tcpriv, rxout, 0);
+ 	if (cnt)
+ 		rxin_action(tcpriv, rxin, DMA_PACKET_SZ, cnt);
+ 
+@@ -783,11 +830,22 @@ static irqreturn_t aca_rx_irq_handler(in
+ {
+ 	struct dc_ep_dev *ep_dev = dev_id;
+ 
+-	/* Disable IRQ in IMCU */
+-	ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_RX);
++	if (g_plat_priv->napi_rx) {
++
++		if (napi_schedule_prep(g_plat_priv->napi_rx)) {
++			ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_RX);
++			__napi_schedule(g_plat_priv->napi_rx);
++		}
++
++	} else {
++
++		/* Disable IRQ in IMCU */
++		ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_RX);
+ 
+-	/* Start tasklet */
+-	tasklet_schedule(&rxout_task);
++		/* Start tasklet */
++		tasklet_schedule(&rxout_task);
++
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -796,15 +854,62 @@ static irqreturn_t aca_tx_irq_handler(in
+ {
+ 	struct dc_ep_dev *ep_dev = dev_id;
+ 
+-	/* Disable IRQ in IMCU */
+-	ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_TX);
++	if (g_plat_priv->napi_tx) {
+ 
+-	/* Start tasklet */
+-	tasklet_schedule(&txout_task);
++		if (napi_schedule_prep(g_plat_priv->napi_tx)) {
++			ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_TX);
++			__napi_schedule(g_plat_priv->napi_tx);
++		}
++
++	} else {
++
++		/* Disable IRQ in IMCU */
++		ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_TX);
++
++		/* Start tasklet */
++		tasklet_schedule(&txout_task);
++
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
++static void plat_net_open(void)
++{
++	struct plat_priv *priv = g_plat_priv;
++	struct tc_priv *tcpriv = plat_to_tcpriv();
++	struct aca_ring *rxout = &priv->soc_rings.rxout;
++	struct aca_ring *txout = &priv->soc_rings.txout;
++	struct dc_ep_dev *ep_dev_rx = &tcpriv->ep_dev[rxout->ep_dev_idx];
++	struct dc_ep_dev *ep_dev_tx = &tcpriv->ep_dev[txout->ep_dev_idx];
++
++	if (priv->napi_rx)
++		napi_enable(priv->napi_rx);
++	ep_dev_rx->hw_ops->icu_en(ep_dev_rx, ACA_HOSTIF_RX);
++
++	if (priv->napi_tx)
++		napi_enable(priv->napi_tx);
++	ep_dev_tx->hw_ops->icu_en(ep_dev_tx, ACA_HOSTIF_TX);
++}
++
++static void plat_net_stop(void)
++{
++	struct plat_priv *priv = g_plat_priv;
++	struct tc_priv *tcpriv = plat_to_tcpriv();
++	struct aca_ring *rxout = &priv->soc_rings.rxout;
++	struct aca_ring *txout = &priv->soc_rings.txout;
++	struct dc_ep_dev *ep_dev_rx = &tcpriv->ep_dev[rxout->ep_dev_idx];
++	struct dc_ep_dev *ep_dev_tx = &tcpriv->ep_dev[txout->ep_dev_idx];
++
++	if (priv->napi_tx)
++		napi_disable(priv->napi_tx);
++	ep_dev_tx->hw_ops->icu_mask(ep_dev_tx, ACA_HOSTIF_TX);
++
++	if (priv->napi_rx)
++		napi_disable(priv->napi_rx);
++	ep_dev_rx->hw_ops->icu_mask(ep_dev_rx, ACA_HOSTIF_RX);
++}
++
+ static void plat_irq_init(struct tc_priv *priv, const char *dev_name)
+ {
+ 	int ret;
+@@ -988,17 +1093,49 @@ static int plat_soc_cfg_get(struct soc_c
+ }
+ 
+ static int plat_open(struct net_device *pdev, const char *dev_name,
++		struct napi_struct *napi_tx, struct napi_struct *napi_rx,
+ 		int id, int flag)
+ {
++	struct tc_priv *priv = g_plat_priv->tc_priv;
++	int i;
++
++	for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
++		disable_irq(priv->ep_dev[i].aca_rx_irq);
++		disable_irq(priv->ep_dev[i].aca_tx_irq);
++	}
++
+ 	g_plat_priv->netdev = pdev;
++	g_plat_priv->napi_tx = napi_tx;
++	g_plat_priv->napi_rx = napi_rx;
++
++	for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
++		enable_irq(priv->ep_dev[i].aca_rx_irq);
++		enable_irq(priv->ep_dev[i].aca_tx_irq);
++	}
+ 
+ 	return 0;
+ }
+ 
+ static void plat_close(struct net_device *pdev, const char *dev_name,
++		struct napi_struct *napi_tx, struct napi_struct *napi_rx,
+ 		int flag)
+ {
++	struct tc_priv *priv = g_plat_priv->tc_priv;
++	int i;
++
++	for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
++		disable_irq(priv->ep_dev[i].aca_rx_irq);
++		disable_irq(priv->ep_dev[i].aca_tx_irq);
++	}
++
+ 	g_plat_priv->netdev = NULL;
++	g_plat_priv->napi_tx = NULL;
++	g_plat_priv->napi_rx = NULL;
++
++	for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
++		enable_irq(priv->ep_dev[i].aca_rx_irq);
++		enable_irq(priv->ep_dev[i].aca_tx_irq);
++	}
+ 
+ 	return;
+ }
+@@ -1084,6 +1221,10 @@ static void plat_tc_ops_setup(struct tc_
+ 	priv->tc_ops.free = plat_mem_free;
+ 	priv->tc_ops.dev_reg = plat_open;
+ 	priv->tc_ops.dev_unreg = plat_close;
++	priv->tc_ops.net_open = plat_net_open;
++	priv->tc_ops.net_stop = plat_net_stop;
++	priv->tc_ops.napi_tx = plat_txout_napi;
++	priv->tc_ops.napi_rx = plat_rxout_napi;
+ 	priv->tc_ops.umt_init = plat_umt_init;
+ 	priv->tc_ops.umt_exit = plat_umt_exit;
+ 	priv->tc_ops.umt_start = plat_umt_start;
+--- a/dcdp/atm_tc.c
++++ b/dcdp/atm_tc.c
+@@ -3650,7 +3650,7 @@ static void atm_aca_ring_config_init(str
+ static int atm_ring_init(struct atm_priv *priv)
+ {
+ 	atm_aca_ring_config_init(priv);
+-	return priv->tc_priv->tc_ops.dev_reg(NULL, g_atm_dev_name, 0, 0);
++	return priv->tc_priv->tc_ops.dev_reg(NULL, g_atm_dev_name, NULL, NULL, 0, 0);
+ }
+ 
+ static int atm_init(struct tc_priv *tcpriv, u32 ep_id)
+@@ -4020,7 +4020,7 @@ void atm_tc_unload(void)
+ 	/* unregister device */
+ 	if (priv->tc_priv->tc_ops.dev_unreg != NULL)
+ 		priv->tc_priv->tc_ops.dev_unreg(NULL,
+-			g_atm_dev_name, 0);
++			g_atm_dev_name, NULL, NULL, 0);
+ 
+ 	/* atm_dev_deinit(priv); */
+ 	/* modem module power off */
+--- a/dcdp/inc/tc_main.h
++++ b/dcdp/inc/tc_main.h
+@@ -209,9 +209,15 @@ struct tc_hw_ops {
+ 	void (*subif_unreg)(struct net_device *pdev, const char *dev_name,
+ 			int subif_id, int flag);
+ 	int (*dev_reg)(struct net_device *pdev, const char *dev_name,
++			struct napi_struct *napi_tx, struct napi_struct *napi_rx,
+ 			int id, int flag);
+ 	void (*dev_unreg)(struct net_device *pdev, const char *dev_name,
++			struct napi_struct *napi_tx, struct napi_struct *napi_rx,
+ 			int flag);
++	void (*net_open)(void);
++	void (*net_stop)(void);
++	int (*napi_tx)(struct napi_struct *napi, int budget);
++	int (*napi_rx)(struct napi_struct *napi, int budget);
+ 
+ 	/*umt init/exit including the corresponding DMA init/exit */
+ 	int (*umt_init)(u32 umt_id, u32 umt_period, u32 umt_dst);
+--- a/dcdp/ptm_tc.c
++++ b/dcdp/ptm_tc.c
+@@ -141,7 +141,11 @@ static int ptm_open(struct net_device *d
+ 	struct ptm_priv *ptm_tc = netdev_priv(dev);
+ 
+ 	tc_info(ptm_tc->tc_priv, MSG_EVENT, "ptm open\n");
++
++	ptm_tc->tc_priv->tc_ops.net_open();
++
+ 	netif_tx_start_all_queues(dev);
++
+ #ifdef CONFIG_SOC_TYPE_XWAY
+ 	xet_phy_wan_port(7, NULL, 1, 1);
+ 	if (ppa_hook_ppa_phys_port_add_fn)
+@@ -158,7 +162,11 @@ static int ptm_stop(struct net_device *d
+ 	struct ptm_priv *ptm_tc = netdev_priv(dev);
+ 
+ 	tc_info(ptm_tc->tc_priv, MSG_EVENT, "ptm stop\n");
++
+ 	netif_tx_stop_all_queues(dev);
++
++	ptm_tc->tc_priv->tc_ops.net_stop();
++
+ #ifdef CONFIG_SOC_TYPE_XWAY
+     if (ppa_drv_datapath_mac_entry_setting)
+         ppa_drv_datapath_mac_entry_setting(dev->dev_addr, 0, 6, 10, 1, 2);
+@@ -555,7 +563,7 @@ static void ptm_rx(struct net_device *de
+ 	ptm_tc->stats64.rx_packets++;
+ 	ptm_tc->stats64.rx_bytes += skb->len;
+ 
+-	if (netif_rx(skb) == NET_RX_DROP)
++	if (netif_receive_skb(skb) == NET_RX_DROP)
+ 		ptm_tc->stats64.rx_dropped++;
+ 
+ 	return;
+@@ -651,6 +659,9 @@ static int ptm_dev_init(struct tc_priv *
+ 	memcpy(ptm_tc->outq_map, def_outq_map, sizeof(def_outq_map));
+ 	SET_NETDEV_DEV(ptm_tc->dev, tc_priv->ep_dev[id].dev);
+ 
++	netif_napi_add(ptm_tc->dev, &ptm_tc->napi_rx, tc_priv->tc_ops.napi_rx, NAPI_POLL_WEIGHT);
++	netif_tx_napi_add(ptm_tc->dev, &ptm_tc->napi_tx, tc_priv->tc_ops.napi_tx, NAPI_POLL_WEIGHT);
++
+ 	err = register_netdev(ptm_tc->dev);
+ 	if (err)
+ 		goto err1;
+@@ -2605,7 +2616,9 @@ static int ptm_ring_init(struct ptm_ep_p
+ {
+ 	ptm_aca_ring_config_init(priv, id, bonding);
+ 	return priv->tc_priv->tc_ops.dev_reg(priv->ptm_tc->dev,
+-		priv->ptm_tc->dev->name, id, bonding);
++		priv->ptm_tc->dev->name,
++		&priv->ptm_tc->napi_tx, &priv->ptm_tc->napi_rx,
++		id, bonding);
+ }
+ 
+ /**
+@@ -2960,7 +2973,9 @@ void ptm_tc_unload(enum dsl_tc_mode tc_m
+ 	/* unregister device */
+ 	if (ptm_tc->tc_priv->tc_ops.dev_unreg != NULL)
+ 		ptm_tc->tc_priv->tc_ops.dev_unreg(ptm_tc->dev,
+-			ptm_tc->dev->name, 0);
++			ptm_tc->dev->name,
++			&priv->ptm_tc->napi_tx, &priv->ptm_tc->napi_rx,
++			0);
+ 
+ 	/* remove PTM callback function */
+ 	ptm_cb_setup(ptm_tc, 0);
+@@ -2978,6 +2993,10 @@ void ptm_exit(void)
+ 
+ 	if (!priv)
+ 		return;
++
++	netif_napi_del(&priv->napi_tx);
++	netif_napi_del(&priv->napi_rx);
++
+ 	unregister_netdev(priv->dev);
+ 	free_netdev(priv->dev);
+ 
+--- a/dcdp/inc/ptm_tc.h
++++ b/dcdp/inc/ptm_tc.h
+@@ -119,6 +119,8 @@ struct ptm_priv {
+ 	u32 ep_id;
+ 	struct ppe_fw fw;
+ 	struct net_device *dev;
++	struct napi_struct napi_tx;
++	struct napi_struct napi_rx;
+ 	spinlock_t ptm_lock;
+ 	struct rtnl_link_stats64 stats64;
+ 	int subif_id;
diff --git a/package/kernel/lantiq/vrx518_tc/patches/203-dbg.patch b/package/kernel/lantiq/vrx518_tc/patches/203-dbg.patch
new file mode 100644
index 0000000000..687e66f991
--- /dev/null
+++ b/package/kernel/lantiq/vrx518_tc/patches/203-dbg.patch
@@ -0,0 +1,120 @@
+--- a/dcdp/platform/sw_plat.c
++++ b/dcdp/platform/sw_plat.c
+@@ -85,6 +85,7 @@ struct aca_ring {
+ 	u32 dnum;
+ 	u32 dsize;
+ 	int idx; /* SoC RX/TX index */
++	u64 cnt;
+ 	int ep_dev_idx;
+ };
+ 
+@@ -210,6 +211,8 @@ struct plat_priv {
+ 	struct net_device *netdev;
+ 	struct napi_struct *napi_tx;
+ 	struct napi_struct *napi_rx;
++	u64 napi_tx_stats[NAPI_POLL_WEIGHT+1];
++	u64 napi_rx_stats[NAPI_POLL_WEIGHT+1];
+ 	DECLARE_HASHTABLE(mem_map, 8);
+ };
+ 
+@@ -362,6 +365,7 @@ static void txlist_free(struct tx_list *
+ static inline void ring_idx_inc(struct aca_ring *ring)
+ {
+ 	ring->idx = (ring->idx + 1) % ring->dnum;
++	ring->cnt += 1;
+ }
+ 
+ static struct sk_buff *txin_skb_prepare(struct sk_buff *skb)
+@@ -619,6 +623,8 @@ static int plat_txout_napi(struct napi_s
+ 
+ 	cnt = txout_action(tcpriv, txout, budget);
+ 
++	priv->napi_tx_stats[cnt] += 1;
++
+ 	if (cnt < budget) {
+ 		if (napi_complete_done(napi, cnt))
+ 			ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_TX);
+@@ -653,6 +659,8 @@ static int plat_rxout_napi(struct napi_s
+ 	if (cnt)
+ 		rxin_action(tcpriv, rxin, DMA_PACKET_SZ, cnt);
+ 
++	priv->napi_rx_stats[cnt] += 1;
++
+ 	if (cnt < budget) {
+ 		if (napi_complete_done(napi, cnt))
+ 			ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_RX);
+@@ -1092,6 +1100,56 @@ static int plat_soc_cfg_get(struct soc_c
+ 	return 0;
+ }
+ 
++static struct proc_dir_entry *g_proc_entry;
++
++static int proc_show(struct seq_file *m, void *p)
++{
++	struct aca_ring *txin = &g_plat_priv->soc_rings.txin;
++	struct aca_ring *txout = &g_plat_priv->soc_rings.txout;
++	struct aca_ring *rxin = &g_plat_priv->soc_rings.rxin;
++	struct aca_ring *rxout = &g_plat_priv->soc_rings.rxout;
++	int i;
++
++	seq_printf(m, "napi_tx_stats: ");
++	for (i = 0; i < sizeof(g_plat_priv->napi_tx_stats) / sizeof(g_plat_priv->napi_tx_stats[0]); i++) {
++		if (i == 0) {
++			seq_printf(m, "%llu", g_plat_priv->napi_tx_stats[i]);
++		} else {
++			seq_printf(m, ", %llu", g_plat_priv->napi_tx_stats[i]);
++		}
++	}
++	seq_printf(m, "\n");
++
++	seq_printf(m, "napi_rx_stats: ");
++	for (i = 0; i < sizeof(g_plat_priv->napi_rx_stats) / sizeof(g_plat_priv->napi_rx_stats[0]); i++) {
++		if (i == 0) {
++			seq_printf(m, "%llu", g_plat_priv->napi_rx_stats[i]);
++		} else {
++			seq_printf(m, ", %llu", g_plat_priv->napi_rx_stats[i]);
++		}
++	}
++	seq_printf(m, "\n");
++
++	seq_printf(m, "txin: %d/%u, %llu\n", txin->idx, txin->dnum, txin->cnt);
++	seq_printf(m, "txout: %d/%u, %llu\n", txout->idx, txout->dnum, txout->cnt);
++	seq_printf(m, "rxin: %d/%u, %llu\n", rxin->idx, rxin->dnum, rxin->cnt);
++	seq_printf(m, "rxout: %d/%u, %llu\n", rxout->idx, rxout->dnum, rxout->cnt);
++
++	return 0;
++}
++
++static int proc_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, proc_show, NULL);
++}
++
++static struct proc_ops proc_operations = {
++        .proc_open    = proc_open,
++        .proc_read    = seq_read,
++        .proc_lseek   = seq_lseek,
++        .proc_release = single_release
++};
++
+ static int plat_open(struct net_device *pdev, const char *dev_name,
+ 		struct napi_struct *napi_tx, struct napi_struct *napi_rx,
+ 		int id, int flag)
+@@ -1099,6 +1157,8 @@ static int plat_open(struct net_device *
+ 	struct tc_priv *priv = g_plat_priv->tc_priv;
+ 	int i;
+ 
++	g_proc_entry = proc_create("swplat", 0600, priv->proc_dir, &proc_operations);
++
+ 	for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
+ 		disable_irq(priv->ep_dev[i].aca_rx_irq);
+ 		disable_irq(priv->ep_dev[i].aca_tx_irq);
+@@ -1137,6 +1197,8 @@ static void plat_close(struct net_device
+ 		enable_irq(priv->ep_dev[i].aca_tx_irq);
+ 	}
+ 
++	proc_remove(g_proc_entry);
++
+ 	return;
+ }
+ 




More information about the lede-commits mailing list