[PATCH] ARM: Support for IXP4xx built-in Ethernet interfaces.

Krzysztof Halasa khc at pm.waw.pl
Sun Apr 7 15:58:41 EDT 2013


Signed-off-by: Krzysztof Hałasa <khc at pm.waw.pl>

diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h
new file mode 100644
index 0000000..1df4aa4
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/include/mach/platform.h
@@ -0,0 +1,15 @@
+#include <asm/types.h>
+
+#define IXP4XX_ETH_NPEA 0x00
+#define IXP4XX_ETH_NPEB 0x10
+#define IXP4XX_ETH_NPEC 0x20
+
+/* Information about built-in Ethernet MAC interfaces */
+struct eth_plat_info {
+	void *regs;
+	u8 npe;
+	u8 phy; /* MII PHY ID, 0 - 31 */
+	u8 rxq; /* configurable, currently 0 - 31 only */
+	u8 txreadyq;
+	u8 hwaddr[6];
+};
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2736094..d6164ea 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -93,6 +93,14 @@ config DRIVER_NET_MACB
 	depends on HAS_MACB
 	select PHYLIB
 
+config DRIVER_NET_IXP4XX_ETH
+	tristate "Intel IXP4xx Ethernet support"
+	depends on ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+	select PHYLIB
+	help
+	  Say Y here if you want to use built-in Ethernet ports
+	  on IXP4xx processor.
+
 config DRIVER_NET_TAP
 	bool "tap Ethernet driver"
 	depends on LINUX
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 42136f8..4a2ced9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_DRIVER_NET_MACB)		+= macb.o
 obj-$(CONFIG_DRIVER_NET_TAP)		+= tap.o
 obj-$(CONFIG_PHYLIB)			+= phy/
 obj-$(CONFIG_NET_USB)			+= usb/
+obj-$(CONFIG_DRIVER_NET_IXP4XX_ETH)	+= ixp4xx_eth.o
 obj-$(CONFIG_DRIVER_NET_TSE)		+= altera_tse.o
 obj-$(CONFIG_DRIVER_NET_KS8851_MLL)	+= ks8851_mll.o
 obj-$(CONFIG_DRIVER_NET_DESIGNWARE)	+= designware.o
diff --git a/drivers/net/ixp4xx_eth.c b/drivers/net/ixp4xx_eth.c
new file mode 100644
index 0000000..1ae37f1
--- /dev/null
+++ b/drivers/net/ixp4xx_eth.c
@@ -0,0 +1,741 @@
+/*
+ * Intel IXP4xx Ethernet driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc at pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Ethernet port config (0x00 is not present on IXP42X):
+ *
+ * logical port		0x00		0x10		0x20
+ * NPE			0 (NPE-A)	1 (NPE-B)	2 (NPE-C)
+ * physical port	2		0		1
+ * RX queue (variable)	20		21		22
+ * TX queue		23		24		25
+ * RX-free queue	26		27		28
+ * TX-done queue is always 31, per-port RX queue is configurable
+ *
+ *
+ * Queue entries:
+ * bits 0 -> 1  - NPE ID (RX and TX-done)
+ * bits 0 -> 2  - priority (TX, per 802.1D)
+ * bits 3 -> 4  - port ID (user-set?)
+ * bits 5 -> 31 - physical descriptor address
+ */
+
+#include <common.h>
+#include <init.h>
+#include <malloc.h>
+#include <net.h>
+#include <errno.h>
+#include <asm/mmu.h>
+#include <linux/mii.h>
+#include <mach/ixp4xx-regs.h>
+#include <mach/platform.h>
+#include <mach/cpu.h>
+#include <mach/npe.h>
+#include <mach/qmgr.h>
+
+#define DEBUG_DESC		0
+#define DEBUG_RX		0
+#define DEBUG_TX		0
+#define DEBUG_PKT_BYTES		0
+#define DEBUG_MDIO		0
+#define DEBUG_OPEN		0
+#define DEBUG_CLOSE		0
+
+#define RX_DESCS		16 /* also length of all RX queues */
+#define TX_DESCS		16 /* also length of all TX queues */
+#define TXDONE_QUEUE_LEN	16 /* dwords */
+
+#define MAX_MRU			1536 /* 0x600 */
+#define RX_BUFF_SIZE		MAX_MRU
+
+#define MAX_MDIO_RETRIES	100 /* microseconds, typically 30 cycles */
+#define MAX_CLOSE_WAIT		1000 /* microseconds, typically 2-3 cycles */
+#define ETH_ALEN		6
+
+#define PHYSICAL_ID(port)	(((port)->npe->id + 2) % 3)
+#define LOGICAL_ID(port)	((port)->npe->id << 4)
+#define RX_QUEUE(port)		((port)->npe->id + 20) /* can be changed */
+#define TX_QUEUE(port)		((port)->npe->id + 23)
+#define RXFREE_QUEUE(port)	((port)->npe->id + 26)
+#define TXDONE_QUEUE		31
+
+/* TX Control Registers */
+#define TX_CNTRL0_TX_EN		0x01
+#define TX_CNTRL0_HALFDUPLEX	0x02
+#define TX_CNTRL0_RETRY		0x04
+#define TX_CNTRL0_PAD_EN	0x08
+#define TX_CNTRL0_APPEND_FCS	0x10
+#define TX_CNTRL0_2DEFER	0x20
+#define TX_CNTRL0_RMII		0x40 /* reduced MII */
+#define TX_CNTRL1_RETRIES	0x0F /* 4 bits */
+
+/* RX Control Registers */
+#define RX_CNTRL0_RX_EN		0x01
+#define RX_CNTRL0_PADSTRIP_EN	0x02
+#define RX_CNTRL0_SEND_FCS	0x04
+#define RX_CNTRL0_PAUSE_EN	0x08
+#define RX_CNTRL0_LOOP_EN	0x10
+#define RX_CNTRL0_ADDR_FLTR_EN	0x20
+#define RX_CNTRL0_RX_RUNT_EN	0x40
+#define RX_CNTRL0_BCAST_DIS	0x80
+#define RX_CNTRL1_DEFER_EN	0x01
+
+/* Core Control Register */
+#define CORE_RESET		0x01
+#define CORE_RX_FIFO_FLUSH	0x02
+#define CORE_TX_FIFO_FLUSH	0x04
+#define CORE_SEND_JAM		0x08
+#define CORE_MDC_EN		0x10 /* MDIO using NPE-B ETH-0 only */
+
+#define DEFAULT_TX_CNTRL0	(TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY |       \
+				 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
+				 TX_CNTRL0_2DEFER)
+#define DEFAULT_RX_CNTRL0	RX_CNTRL0_RX_EN
+#define DEFAULT_CORE_CNTRL	CORE_MDC_EN
+
+
+/* NPE message codes */
+#define NPE_GETSTATUS			 0x00
+#define NPE_EDB_SETPORTADDRESS		 0x01
+#define NPE_EDB_GETMACADDRESSDATABASE	 0x02
+#define NPE_EDB_SETMACADDRESSSDATABASE	 0x03
+#define NPE_GETSTATS			 0x04
+#define NPE_RESETSTATS			 0x05
+#define NPE_SETMAXFRAMELENGTHS		 0x06
+#define NPE_VLAN_SETRXTAGMODE		 0x07
+#define NPE_VLAN_SETDEFAULTRXVID	 0x08
+#define NPE_VLAN_SETPORTVLANTABLEENTRY	 0x09
+#define NPE_VLAN_SETPORTVLANTABLERANGE	 0x0A
+#define NPE_VLAN_SETRXQOSENTRY		 0x0B
+#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
+#define NPE_STP_SETBLOCKINGSTATE	 0x0D
+#define NPE_FW_SETFIREWALLMODE		 0x0E
+#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
+#define NPE_PC_SETAPMACTABLE		 0x11
+#define NPE_SETLOOPBACK_MODE		 0x12
+#define NPE_PC_SETBSSIDTABLE		 0x13
+#define NPE_ADDRESS_FILTER_CONFIG	 0x14
+#define NPE_APPENDFCSCONFIG		 0x15
+#define NPE_NOTIFY_MAC_RECOVERY_DONE	 0x16
+#define NPE_MAC_RECOVERY_START		 0x17
+
+struct eth_regs {
+	u32 tx_control[2], __res1[2];		 /* 000 */
+	u32 rx_control[2], __res2[2];		 /* 010 */
+	u32 random_seed, __res3[3];		 /* 020 */
+	u32 partial_empty_threshold, __res4;	 /* 030 */
+	u32 partial_full_threshold, __res5;	 /* 038 */
+	u32 tx_start_bytes, __res6[3];		 /* 040 */
+	u32 tx_deferral, rx_deferral, __res7[2]; /* 050 */
+	u32 tx_2part_deferral[2], __res8[2];	 /* 060 */
+	u32 slot_time, __res9[3];		 /* 070 */
+	u32 mdio_command[4];			 /* 080 */
+	u32 mdio_status[4];			 /* 090 */
+	u32 mcast_mask[6], __res10[2];		 /* 0A0 */
+	u32 mcast_addr[6], __res11[2];		 /* 0C0 */
+	u32 int_clock_threshold, __res12[3];	 /* 0E0 */
+	u32 hw_addr[6], __res13[61];		 /* 0F0 */
+	u32 core_control;			 /* 1FC */
+};
+
+/* NPE message structure */
+struct msg {
+	u8 cmd, eth_id, params[6];
+};
+
+/* Ethernet packet descriptor, 32 bytes */
+struct desc {
+	u8 *next;    /* pointer to next buffer, unused */
+
+	u16 buf_len; /* buffer length */
+	u16 pkt_len; /* packet length */
+	u8 *data;    /* pointer to data buffer in RAM */
+	u8 dest_id;
+	u8 src_id;
+	u16 flags;
+	u8 qos;
+	u8 padlen;
+	u16 vlan_tci;
+
+	u8 dst_mac[ETH_ALEN], src_mac[ETH_ALEN];
+};
+
+struct io {
+	struct desc rx_desc_tab[RX_DESCS]; /* alignment: 0x10 */
+	struct desc tx_desc_tab[TX_DESCS];
+	u8 rx_buff_tab[RX_DESCS][MAX_MRU];
+	u8 tx_buff_tab[TX_DESCS][MAX_MRU];
+};
+
+struct port {
+	struct io *io;
+	struct eth_regs *regs;
+	struct npe *npe;
+	u8 firmware[4];
+	struct eth_plat_info *pinfo;
+	struct mii_bus mii_bus;
+	struct eth_device eth;
+};
+
+static struct eth_regs *mdio_regs; /* mdio command and status only */
+
+static int ixp4xx_mdio_cmd(int write, const struct device_d *dev, unsigned char phy_id,
+			   unsigned char location, unsigned short value)
+{
+	int cycles = 0;
+
+	if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
+		fprintf(stderr, "%s%d: MII not ready to transmit\n", dev->name, dev->id);
+		return -1;
+	}
+
+	if (write) {
+		__raw_writel(value & 0xFF, &mdio_regs->mdio_command[0]);
+		__raw_writel(value >> 8, &mdio_regs->mdio_command[1]);
+	}
+	__raw_writel(((phy_id << 5) | location) & 0xFF,
+		     &mdio_regs->mdio_command[2]);
+	__raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
+		     &mdio_regs->mdio_command[3]);
+
+	while ((cycles < MAX_MDIO_RETRIES) &&
+	       (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
+		udelay(1);
+		cycles++;
+	}
+
+	if (cycles == MAX_MDIO_RETRIES) {
+		fprintf(stderr, "%s%d: MII write failed\n", dev->name, dev->id);
+		return -1;
+	}
+
+#if DEBUG_MDIO
+	fprintf(stderr, "%s%d: mdio_%s() took %i cycles\n", dev->name, dev->id,
+		write ? "write" : "read", cycles);
+#endif
+
+	if (write)
+		return 0;
+
+	if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
+#if DEBUG_MDIO
+		fprintf(stderr, "%s%d: MII read failed\n", dev->name, dev->id);
+#endif
+		return -1;
+	}
+
+	value = (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
+		((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
+#if DEBUG_MDIO
+	fprintf(stderr, "%s%d: MII read [%i] -> 0x%X\n", dev->name, dev->id, location, value);
+#endif
+
+	return value;
+}
+
+static int ixp4xx_mdio_read(struct mii_bus *mii, int phy_id, int location)
+{
+	int ret = ixp4xx_mdio_cmd(0, &mii->dev, phy_id, location, 0);
+	return ret;
+}
+
+static int ixp4xx_mdio_write(struct mii_bus *mii, int phy_id, int location, u16 value)
+{
+	int ret = ixp4xx_mdio_cmd(1, &mii->dev, phy_id, location, value);
+#if DEBUG_MDIO
+	fprintf(stderr, "%s%d: MII write [%i] <- 0x%X, err = %i\n",
+		mii->dev.name, mii->dev.id, location, value, ret);
+#endif
+	return ret;
+}
+
+static void ixp4xx_adjust_link(struct eth_device *dev)
+{
+	struct port *port = dev->priv;
+
+	if (dev->phydev->duplex)
+		__raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
+			     &port->regs->tx_control[0]);
+	else
+		__raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
+			     &port->regs->tx_control[0]);
+}
+
+static inline void debug_pkt(struct eth_device *dev, const char *func,
+			     u8 *data, int len)
+{
+#if DEBUG_PKT_BYTES
+	int i;
+
+	fprintf(stderr, "%s%d: %s(%4i) ", dev->dev.name, dev->dev.id, func, len);
+	for (i = 0; i < len; i++) {
+		if (i >= DEBUG_PKT_BYTES)
+			break;
+		fprintf(stderr, "%s%02X",
+			((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
+			data[i]);
+	}
+	fprintf(stderr, "\n");
+#endif
+}
+
+
+static inline void debug_desc(struct desc *desc)
+{
+#if DEBUG_DESC
+	fprintf(stderr, "%07X: %X %3X %3X %07X %2X < %2X %4X %X"
+		" %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
+		(u32)desc, (u32)desc->next, desc->buf_len, desc->pkt_len,
+		(u32)desc->data, desc->dest_id, desc->src_id, desc->flags,
+		desc->qos, desc->padlen, desc->vlan_tci,
+		desc->dst_mac[0], desc->dst_mac[1], desc->dst_mac[2],
+		desc->dst_mac[3], desc->dst_mac[4], desc->dst_mac[5],
+		desc->src_mac[0], desc->src_mac[1], desc->src_mac[2],
+		desc->src_mac[3], desc->src_mac[4], desc->src_mac[5]);
+#endif
+}
+
+static inline int queue_get_desc(unsigned int queue, struct port *port,
+				 int is_tx)
+{
+	u32 addr, n;
+	struct desc *tab;
+
+	if (!(addr = qmgr_get_entry(queue)))
+		return -1;
+
+	addr &= ~0x1F; /* mask out non-address bits */
+	tab = is_tx ? port->io->tx_desc_tab : port->io->rx_desc_tab;
+	n = (addr - (u32)tab) / sizeof(struct desc);
+	BUG_ON(n >= (is_tx ? TX_DESCS : RX_DESCS));
+	debug_desc((struct desc*)addr);
+	BUG_ON(tab[n].next);
+	return n;
+}
+
+static inline void queue_put_desc(unsigned int queue, struct desc *desc)
+{
+	debug_desc(desc);
+	BUG_ON(((u32)desc) & 0x1F);
+	qmgr_put_entry(queue, (u32)desc);
+	/* Don't check for queue overflow here, we've allocated sufficient
+	   length and queues >= 32 don't support this check anyway. */
+}
+
+
+static int ixp4xx_eth_poll(struct eth_device *dev)
+{
+	struct port *port = dev->priv;
+	struct desc *desc;
+	u8 *buff;
+	int n, len;
+
+#if DEBUG_RX
+	fprintf(stderr, "%s%d: eth_poll\n", dev->dev.name, dev->dev.id);
+#endif
+
+	if ((n = queue_get_desc(RX_QUEUE(port), port, 0)) < 0) {
+#if DEBUG_RX
+		fprintf(stderr, "%s%d: eth_poll = no packet received\n", dev->dev.name, dev->dev.id);
+#endif
+		return 0;
+	}
+
+	barrier();
+	desc = &port->io->rx_desc_tab[n];
+	buff = port->io->rx_buff_tab[n];
+	len = desc->pkt_len;
+	/* process received frame */
+	memcpy((void *)NetRxPackets[0], buff, len);
+	debug_pkt(dev, "RX", desc->data, len);
+
+	/* put the new buffer on RX-free queue */
+	desc->buf_len = MAX_MRU;
+	desc->pkt_len = 0;
+	queue_put_desc(RXFREE_QUEUE(port), desc);
+
+	net_receive(NetRxPackets[0], len);
+
+#if DEBUG_RX
+	fprintf(stderr, "%s%d: eth_poll end\n", dev->dev.name, dev->dev.id);
+#endif
+	return 0;
+}
+
+
+static int ixp4xx_eth_xmit(struct eth_device *dev, void *data, int len)
+{
+	struct port *port = dev->priv;
+	int n;
+	struct desc *desc;
+
+#if DEBUG_TX
+	fprintf(stderr, "%s%d: eth_xmit\n", dev->dev.name, dev->dev.id);
+#endif
+
+	if (unlikely(len > 1500))
+		return -1;
+
+	debug_pkt(dev, "TX", data, len);
+
+	if ((n = queue_get_desc(TXDONE_QUEUE, port, 1)) < 0)
+		return -1; /* no free buffers */
+	desc = &port->io->tx_desc_tab[n];
+	desc->data = port->io->tx_buff_tab[n];
+	desc->buf_len = desc->pkt_len = len;
+	memcpy(desc->data, data, len);
+
+	/* NPE firmware pads short frames with zeros internally */
+	// wmb();
+	barrier();
+	queue_put_desc(TX_QUEUE(port), desc);
+
+#if DEBUG_TX
+	fprintf(stderr, "%s%d: eth_xmit end\n", dev->dev.name, dev->dev.id);
+#endif
+	return 0;
+}
+
+static void request_queues(struct port *port, struct eth_device *dev)
+{
+	qmgr_request_queue(RXFREE_QUEUE(port), RX_DESCS, 0, 0, "%s:RX-free", dev->dev.name);
+	qmgr_request_queue(RX_QUEUE(port), RX_DESCS, 0, 0, "%s:RX", dev->dev.name);
+	qmgr_request_queue(TX_QUEUE(port), TX_DESCS, 0, 0, "%s:TX", dev->dev.name);
+
+	/* Common TX-done queue handles buffers sent out by the NPEs */
+	qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
+			   "%s:TX-done", dev->dev.name);
+}
+
+static void release_queues(struct port *port)
+{
+	qmgr_release_queue(RXFREE_QUEUE(port));
+	qmgr_release_queue(RX_QUEUE(port));
+	qmgr_release_queue(TX_QUEUE(port));
+	qmgr_release_queue(TXDONE_QUEUE);
+}
+
+static void init_queues(struct port *port)
+{
+	int i;
+
+	memset(port->io->tx_desc_tab, 0, sizeof(port->io->tx_desc_tab)); /* descs */
+	memset(port->io->rx_desc_tab, 0, sizeof(port->io->rx_desc_tab));
+
+	/* Setup RX buffers */
+	for (i = 0; i < RX_DESCS; i++) {
+		struct desc *desc = &port->io->rx_desc_tab[i];
+		desc->buf_len = MAX_MRU;
+		desc->data = port->io->rx_buff_tab[i];
+	}
+}
+
+static int ixp4xx_eth_open(struct eth_device *dev)
+{
+	struct port *port = dev->priv;
+	struct npe *npe = port->npe;
+	struct msg msg;
+	int i, err;
+
+#if DEBUG_OPEN
+	fprintf(stderr, "%s%d: opening %p\n", dev->dev.name, dev->dev.id, dev);
+#endif
+
+	if (!npe_running(npe)) {
+		err = npe_load_firmware(npe);
+		if (err)
+			return err;
+
+		if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
+			fprintf(stderr, "%s%d: %s not responding\n", dev->dev.name, dev->dev.id, npe->name);
+			return -EIO;
+		}
+		memcpy(port->firmware, msg.params + 2, 4);
+	}
+
+	err = phy_device_connect(dev, &port->mii_bus, port->pinfo->phy,
+				 ixp4xx_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+	if (err)
+		return err;
+
+	port->io = dma_alloc_coherent(sizeof(*port->io));
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = NPE_VLAN_SETRXQOSENTRY;
+	msg.eth_id = LOGICAL_ID(port);
+	msg.params[3] = RX_QUEUE(port) | 0x80;
+	msg.params[4] = RX_QUEUE(port) >> 4; /* MSB of offset */
+	msg.params[5] = RX_QUEUE(port) << 4; /* LSB of offset */
+	for (i = 0; i < 8; i++) {
+		msg.params[1] = i;
+		if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) {
+			err = -EIO;
+			goto out;
+		}
+	}
+
+	msg.cmd = NPE_EDB_SETPORTADDRESS;
+	msg.eth_id = PHYSICAL_ID(port);
+	memcpy(msg.params, port->pinfo->hwaddr, ETH_ALEN);
+	if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) {
+		err = -EIO;
+		goto out;
+	}
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = NPE_FW_SETFIREWALLMODE;
+	msg.eth_id = LOGICAL_ID(port);
+	if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) {
+		err = -EIO;
+		goto out;
+	}
+
+	request_queues(port, dev);
+	init_queues(port);
+
+	for (i = 0; i < ETH_ALEN; i++)
+		__raw_writel(port->pinfo->hwaddr[i], &port->regs->hw_addr[i]);
+	__raw_writel(0x08, &port->regs->random_seed);
+	__raw_writel(0x12, &port->regs->partial_empty_threshold);
+	__raw_writel(0x30, &port->regs->partial_full_threshold);
+	__raw_writel(0x08, &port->regs->tx_start_bytes);
+	__raw_writel(0x15, &port->regs->tx_deferral);
+	__raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
+	__raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
+	__raw_writel(0x80, &port->regs->slot_time);
+	__raw_writel(0x01, &port->regs->int_clock_threshold);
+
+	/* Populate queues with buffers, no failure after this point */
+	for (i = 0; i < TX_DESCS; i++)
+		queue_put_desc(TXDONE_QUEUE, &port->io->tx_desc_tab[i]);
+
+	for (i = 0; i < RX_DESCS; i++)
+		queue_put_desc(RXFREE_QUEUE(port), &port->io->rx_desc_tab[i]);
+
+	__raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
+	__raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
+	__raw_writel(0, &port->regs->rx_control[1]);
+	__raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
+
+#if 0
+	qmgr_set_irq(RX_QUEUE(port), QUEUE_IRQ_SRC_NOT_EMPTY,
+		     eth_rx_irq, dev);
+	qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
+		     eth_txdone_irq, NULL);
+	qmgr_enable_irq(TXDONE_QUEUE);
+#endif
+	memset(&msg, 0, sizeof(msg));
+#if DEBUG_OPEN
+	fprintf(stderr, "%s%d opened\n", dev->dev.name, dev->dev.id);
+#endif
+	return 0;
+out:
+	dma_free_coherent(port->io, sizeof(*port->io));
+	port->io = NULL;
+#if DEBUG_OPEN
+	fprintf(stderr, "%s%d open failed (%i)\n", dev->dev.name, dev->dev.id, err);
+#endif
+	return err;
+}
+
+static void ixp4xx_eth_close(struct eth_device *dev)
+{
+	struct port *port = dev->priv;
+	struct msg msg;
+	int buffs = RX_DESCS; /* allocated RX buffers */
+	int i;
+
+#if DEBUG_CLOSE
+	fprintf(stderr, "%s%d: closing\n", dev->dev.name, dev->dev.id);
+#endif
+#if 0
+	qmgr_disable_irq(RX_QUEUE(port));
+#endif
+
+	if (!port->io)
+		return; /* already closed */
+
+	while (queue_get_desc(RXFREE_QUEUE(port), port, 0) >= 0)
+		buffs--;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = NPE_SETLOOPBACK_MODE;
+	msg.eth_id = LOGICAL_ID(port);
+	msg.params[1] = 1;
+	if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
+		fprintf(stderr, "%s%d: unable to enable loopback\n", dev->dev.name, dev->dev.id);
+
+#if DEBUG_CLOSE
+	fprintf(stderr, "%s%d: draining RX queue\n", dev->dev.name, dev->dev.id);
+#endif
+	i = 0;
+	do { /* drain RX buffers */
+		while (queue_get_desc(RX_QUEUE(port), port, 0) >= 0)
+			buffs--;
+		if (!buffs)
+			break;
+		if (qmgr_stat_full(TXDONE_QUEUE) && !(i % 10)) {
+			/* we have to inject some packet */
+			struct desc *desc;
+			int n = queue_get_desc(TXDONE_QUEUE, port, 1);
+			BUG_ON(n < 0);
+			desc = &port->io->tx_desc_tab[n];
+			desc->buf_len = desc->pkt_len = 1;
+			//wmb();
+			barrier();
+			queue_put_desc(TX_QUEUE(port), desc);
+		}
+		udelay(1);
+	} while (++i < MAX_CLOSE_WAIT);
+
+	if (buffs)
+		fprintf(stderr, "%s%d: unable to drain RX queue, %i buffer(s) left in NPE\n",
+			dev->dev.name, dev->dev.id, buffs);
+#if DEBUG_CLOSE
+	if (!buffs)
+		fprintf(stderr, "%s%d: draining RX queue took %i cycles\n", dev->dev.name, dev->dev.id, i);
+#endif
+
+	buffs = TX_DESCS;
+	while (queue_get_desc(TX_QUEUE(port), port, 1) >= 0)
+		buffs--; /* cancel TX */
+
+	i = 0;
+	do {
+		while (queue_get_desc(TXDONE_QUEUE, port, 1) >= 0)
+			buffs--;
+		if (!buffs)
+			break;
+	} while (++i < MAX_CLOSE_WAIT);
+
+	if (buffs)
+		fprintf(stderr, "%s%d: unable to drain TX queue, %i buffer(s) left in NPE\n",
+			dev->dev.name, dev->dev.id, buffs);
+#if DEBUG_CLOSE
+	if (!buffs)
+		fprintf(stderr, "%s%d: draining TX queues took %i cycles\n", dev->dev.name, dev->dev.id, i);
+#endif
+
+	msg.params[1] = 0;
+	if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
+		fprintf(stderr, "%s%d: unable to disable loopback\n", dev->dev.name, dev->dev.id);
+
+#if 0
+	qmgr_disable_irq(TXDONE_QUEUE);
+#endif
+	release_queues(port);
+	dma_free_coherent(port->io, sizeof(*port->io));
+	port->io = NULL;
+#if DEBUG_CLOSE
+	fprintf(stderr, "%s%d: closed\n", dev->dev.name, dev->dev.id);
+#endif
+}
+
+static int ixp4xx_eth_get_hwaddr(struct eth_device *eth, unsigned char *addr)
+{
+	struct port *port = eth->priv;
+	memcpy(addr, port->pinfo->hwaddr, 6);
+	return 0;
+}
+
+static int ixp4xx_eth_set_hwaddr(struct eth_device *eth, unsigned char *addr)
+{
+	struct port *port = eth->priv;
+	memcpy(port->pinfo->hwaddr, addr, 6);
+	return 0;
+}
+
+static int ixp4xx_eth_init(struct eth_device *eth)
+{
+	struct port *port = eth->priv;
+
+	__raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
+		     &port->regs->core_control);
+	udelay(50);
+	__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
+	udelay(50);
+	return 0;
+}
+
+static int ixp4xx_eth_probe(struct device_d *dev)
+{
+	struct npe *npe;
+	struct port *port;
+	struct eth_plat_info *pinfo = dev->platform_data;
+
+	if (!pinfo) {
+		fprintf(stderr, "ixp4xx_eth: no platform information\n");
+		return -ENODEV;
+	}
+
+	if (!(npe = npe_request(pinfo->npe))) {
+		fprintf(stderr, "ixp4xx_eth: unable to acquire NPE\n");
+		return -ENODEV;
+	}
+
+	port = xmemalign(0x20, sizeof(*port));
+	memset(port, 0, sizeof(*port));
+
+	port->regs = pinfo->regs;
+	port->npe = npe;
+	port->pinfo = pinfo;
+	port->eth.dev.id = -1;
+	port->eth.priv = port;
+	port->eth.init = ixp4xx_eth_init;
+	port->eth.open = ixp4xx_eth_open;
+	port->eth.halt = ixp4xx_eth_close;
+	port->eth.send = ixp4xx_eth_xmit;
+	port->eth.recv = ixp4xx_eth_poll;
+	port->eth.get_ethaddr = ixp4xx_eth_get_hwaddr;
+	port->eth.set_ethaddr = ixp4xx_eth_set_hwaddr;
+
+	port->mii_bus.dev.id = -1;
+	port->mii_bus.read = ixp4xx_mdio_read;
+	port->mii_bus.write = ixp4xx_mdio_write;
+	mdiobus_register(&port->mii_bus);
+	eth_register(&port->eth);
+	dev->priv = port;
+	return 0;
+}
+
+static void ixp4xx_eth_remove(struct device_d *dev)
+{
+	struct port *port = dev->priv;
+	ixp4xx_eth_close(&port->eth);
+	eth_unregister(&port->eth);
+	mdiobus_unregister(&port->mii_bus);
+	free(port);
+}
+
+static struct driver_d ixp4xx_eth_driver = {
+	.name  = "ixp4xx_eth",
+	.probe = ixp4xx_eth_probe,
+	.remove = ixp4xx_eth_remove,
+};
+
+static int __init ixp4xx_eth_module_init(void)
+{
+	if (cpu_is_ixp43x()) {
+		/* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
+		if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
+			return -ENOSYS;
+		mdio_regs = (struct eth_regs *)IXP4XX_EthC_BASE;
+	} else {
+		/* All MII PHY accesses use NPE-B Ethernet registers */
+		if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
+			return -ENOSYS;
+		mdio_regs = (struct eth_regs *)IXP4XX_EthB_BASE;
+	}
+
+	__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
+
+	platform_driver_register(&ixp4xx_eth_driver);
+	return 0;
+}
+
+device_initcall(ixp4xx_eth_module_init);



More information about the barebox mailing list