[PATCH 04/10] net: Add support for IXP4xx ethernet support
Jean-Christophe PLAGNIOL-VILLARD
plagnioj at jcrosoft.com
Mon Apr 23 03:02:13 EDT 2012
From: Krzysztof Hałasa <khc at pm.waw.pl>
Signed-off-by: Krzysztof Hałasa <khc at pm.waw.pl>
Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD <plagnioj at jcrosoft.com>
---
arch/arm/mach-ixp4xx/devices.c | 35 ++
drivers/net/Kconfig | 8 +
drivers/net/Makefile | 1 +
drivers/net/ixp4xx_eth.c | 699 ++++++++++++++++++++++++++++++++++++++++
4 files changed, 743 insertions(+), 0 deletions(-)
create mode 100644 drivers/net/ixp4xx_eth.c
diff --git a/arch/arm/mach-ixp4xx/devices.c b/arch/arm/mach-ixp4xx/devices.c
index 8a496d1..8fbfd93 100644
--- a/arch/arm/mach-ixp4xx/devices.c
+++ b/arch/arm/mach-ixp4xx/devices.c
@@ -4,6 +4,8 @@
#include <asm/armlinux.h>
#include <asm/io.h>
#include <mach/ixp4xx-regs.h>
+#include <mach/platform.h>
+#include <mach/cpu.h>
#ifdef CONFIG_DRIVER_SERIAL_NS16550
/**
@@ -58,3 +60,36 @@ struct device_d* ixp4xx_add_uart(int id, u32 base)
return NULL;
}
#endif /* CONFIG_DRIVER_SERIAL_NS16550 */
+
+#ifdef CONFIG_DRIVER_NET_IXP4XX_ETH
+struct device_d* ixp4xx_add_eth(int id, struct eth_plat_info *pdata)
+{
+ resource_size_t start;
+
+ switch (id) {
+ case 1:
+ if (!cpu_is_ixp43x()) {
+ /* All MII PHY accesses use NPE-B Ethernet registers */
+ if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
+ return NULL;
+ start = (resource_size_t)IXP4XX_EthB_BASE;
+ break;
+ }
+ case 2:
+ if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
+ return NULL;
+ start = (resource_size_t)IXP4XX_EthC_BASE;
+ break;
+ default:
+ return NULL;
+ }
+
+ return add_generic_device("ixp4xx_eth", id, NULL, start, 0xfff,
+ IORESOURCE_MEM, pdata);
+}
+#else
+struct device_d* ixp4xx_add_eth(int id, struct eth_plat_info *pdata)
+{
+ return NULL;
+}
+#endif /* CONFIG_DRIVER_NET_IXP4XX_ETH */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 172cc39..47c86dc 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -81,6 +81,14 @@ config DRIVER_NET_MACB
depends on HAS_MACB
select MIIDEV
+config DRIVER_NET_IXP4XX_ETH
+ tristate "Intel IXP4xx Ethernet support"
+ depends on ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+ select MIIDEV
+ help
+ Say Y here if you want to use built-in Ethernet ports
+ on IXP4xx processor.
+
config DRIVER_NET_TAP
bool "tap Ethernet driver"
depends on LINUX
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 34dbee9..9f18270 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_MIIDEV) += miidev.o
obj-$(CONFIG_NET_USB) += usb/
obj-$(CONFIG_DRIVER_NET_TSE) += altera_tse.o
obj-$(CONFIG_DRIVER_NET_KS8851_MLL) += ks8851_mll.o
+obj-$(CONFIG_DRIVER_NET_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/ixp4xx_eth.c b/drivers/net/ixp4xx_eth.c
new file mode 100644
index 0000000..cacb5d1
--- /dev/null
+++ b/drivers/net/ixp4xx_eth.c
@@ -0,0 +1,699 @@
+/*
+ * Intel IXP4xx Ethernet driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc at pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Ethernet port config (0x00 is not present on IXP42X):
+ *
+ * logical port 0x00 0x10 0x20
+ * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
+ * physical port 2 0 1
+ * RX queue (variable) 20 21 22
+ * TX queue 23 24 25
+ * RX-free queue 26 27 28
+ * TX-done queue is always 31, per-port RX queue is configurable
+ *
+ *
+ * Queue entries:
+ * bits 0 -> 1 - NPE ID (RX and TX-done)
+ * bits 0 -> 2 - priority (TX, per 802.1D)
+ * bits 3 -> 4 - port ID (user-set?)
+ * bits 5 -> 31 - physical descriptor address
+ */
+
+#include <common.h>
+#include <init.h>
+#include <malloc.h>
+#include <miidev.h>
+#include <net.h>
+#include <errno.h>
+#include <mach/ixp4xx-regs.h>
+#include <mach/platform.h>
+#include <mach/cpu.h>
+#include <mach/npe.h>
+#include <mach/qmgr.h>
+
+#define DEBUG_DESC 0
+#define DEBUG_RX 0
+#define DEBUG_TX 0
+#define DEBUG_PKT_BYTES 0
+#define DEBUG_MDIO 0
+#define DEBUG_OPEN 0
+#define DEBUG_CLOSE 0
+
+#define RX_DESCS 16 /* also length of all RX queues */
+#define TX_DESCS 16 /* also length of all TX queues */
+#define TXDONE_QUEUE_LEN 16 /* dwords */
+
+#define MAX_MRU 1536 /* 0x600 */
+#define RX_BUFF_SIZE MAX_MRU
+
+#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
+#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
+#define ETH_ALEN 6
+
+#define PHYSICAL_ID(port) (((port)->npe->id + 2) % 3)
+#define LOGICAL_ID(port) ((port)->npe->id << 4)
+#define RX_QUEUE(port) ((port)->npe->id + 20) /* can be changed */
+#define TX_QUEUE(port) ((port)->npe->id + 23)
+#define RXFREE_QUEUE(port) ((port)->npe->id + 26)
+#define TXDONE_QUEUE 31
+
+/* TX Control Registers */
+#define TX_CNTRL0_TX_EN 0x01
+#define TX_CNTRL0_HALFDUPLEX 0x02
+#define TX_CNTRL0_RETRY 0x04
+#define TX_CNTRL0_PAD_EN 0x08
+#define TX_CNTRL0_APPEND_FCS 0x10
+#define TX_CNTRL0_2DEFER 0x20
+#define TX_CNTRL0_RMII 0x40 /* reduced MII */
+#define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
+
+/* RX Control Registers */
+#define RX_CNTRL0_RX_EN 0x01
+#define RX_CNTRL0_PADSTRIP_EN 0x02
+#define RX_CNTRL0_SEND_FCS 0x04
+#define RX_CNTRL0_PAUSE_EN 0x08
+#define RX_CNTRL0_LOOP_EN 0x10
+#define RX_CNTRL0_ADDR_FLTR_EN 0x20
+#define RX_CNTRL0_RX_RUNT_EN 0x40
+#define RX_CNTRL0_BCAST_DIS 0x80
+#define RX_CNTRL1_DEFER_EN 0x01
+
+/* Core Control Register */
+#define CORE_RESET 0x01
+#define CORE_RX_FIFO_FLUSH 0x02
+#define CORE_TX_FIFO_FLUSH 0x04
+#define CORE_SEND_JAM 0x08
+#define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
+
+#define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
+ TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
+ TX_CNTRL0_2DEFER)
+#define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
+#define DEFAULT_CORE_CNTRL CORE_MDC_EN
+
+
+/* NPE message codes */
+#define NPE_GETSTATUS 0x00
+#define NPE_EDB_SETPORTADDRESS 0x01
+#define NPE_EDB_GETMACADDRESSDATABASE 0x02
+#define NPE_EDB_SETMACADDRESSSDATABASE 0x03
+#define NPE_GETSTATS 0x04
+#define NPE_RESETSTATS 0x05
+#define NPE_SETMAXFRAMELENGTHS 0x06
+#define NPE_VLAN_SETRXTAGMODE 0x07
+#define NPE_VLAN_SETDEFAULTRXVID 0x08
+#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
+#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
+#define NPE_VLAN_SETRXQOSENTRY 0x0B
+#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
+#define NPE_STP_SETBLOCKINGSTATE 0x0D
+#define NPE_FW_SETFIREWALLMODE 0x0E
+#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
+#define NPE_PC_SETAPMACTABLE 0x11
+#define NPE_SETLOOPBACK_MODE 0x12
+#define NPE_PC_SETBSSIDTABLE 0x13
+#define NPE_ADDRESS_FILTER_CONFIG 0x14
+#define NPE_APPENDFCSCONFIG 0x15
+#define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
+#define NPE_MAC_RECOVERY_START 0x17
+
+struct eth_regs {
+ u32 tx_control[2], __res1[2]; /* 000 */
+ u32 rx_control[2], __res2[2]; /* 010 */
+ u32 random_seed, __res3[3]; /* 020 */
+ u32 partial_empty_threshold, __res4; /* 030 */
+ u32 partial_full_threshold, __res5; /* 038 */
+ u32 tx_start_bytes, __res6[3]; /* 040 */
+ u32 tx_deferral, rx_deferral, __res7[2]; /* 050 */
+ u32 tx_2part_deferral[2], __res8[2]; /* 060 */
+ u32 slot_time, __res9[3]; /* 070 */
+ u32 mdio_command[4]; /* 080 */
+ u32 mdio_status[4]; /* 090 */
+ u32 mcast_mask[6], __res10[2]; /* 0A0 */
+ u32 mcast_addr[6], __res11[2]; /* 0C0 */
+ u32 int_clock_threshold, __res12[3]; /* 0E0 */
+ u32 hw_addr[6], __res13[61]; /* 0F0 */
+ u32 core_control; /* 1FC */
+};
+
+/* NPE message structure */
+struct msg {
+ u8 cmd, eth_id, params[6];
+};
+
+/* Ethernet packet descriptor, 32 bytes */
+struct desc {
+ u8 *next; /* pointer to next buffer, unused */
+
+ u16 buf_len; /* buffer length */
+ u16 pkt_len; /* packet length */
+ u8 *data; /* pointer to data buffer in RAM */
+ u8 dest_id;
+ u8 src_id;
+ u16 flags;
+ u8 qos;
+ u8 padlen;
+ u16 vlan_tci;
+
+ u8 dst_mac[ETH_ALEN], src_mac[ETH_ALEN];
+};
+
+struct port {
+ struct desc rx_desc_tab[RX_DESCS]; /* alignment: 0x10 */
+ struct desc tx_desc_tab[TX_DESCS];
+ u8 *buff_tab;
+ struct eth_regs *regs;
+ struct npe *npe;
+ u8 firmware[4];
+ struct eth_plat_info *pinfo;
+ struct mii_device miidev;
+ struct eth_device eth;
+};
+
+#define rx_buff(port, n) ((port)->buff_tab + MAX_MRU * (n))
+#define tx_buff(port, n) ((port)->buff_tab + MAX_MRU * (RX_DESCS + (n)))
+
+static int ixp4xx_mdio_cmd(const struct mii_device *mii, int write, unsigned char phy_id,
+ unsigned char location, unsigned short value)
+{
+ int cycles = 0;
+ struct port *port = mii->edev->priv;
+ struct eth_regs *mdio_regs = port->regs;
+
+ if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
+ dev_err(&mii->dev, "MII not ready to transmit\n");
+ return -1;
+ }
+
+ if (write) {
+ __raw_writel(value & 0xFF, &mdio_regs->mdio_command[0]);
+ __raw_writel(value >> 8, &mdio_regs->mdio_command[1]);
+ }
+ __raw_writel(((phy_id << 5) | location) & 0xFF,
+ &mdio_regs->mdio_command[2]);
+ __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
+ &mdio_regs->mdio_command[3]);
+
+ while ((cycles < MAX_MDIO_RETRIES) &&
+ (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
+ udelay(1);
+ cycles++;
+ }
+
+ if (cycles == MAX_MDIO_RETRIES) {
+ dev_err(&mii->dev, "MII write failed\n");
+ return -1;
+ }
+
+ dev_dbg(&mii->dev, "mdio_%s() took %i cycles\n",
+ write ? "write" : "read", cycles);
+
+ if (write)
+ return 0;
+
+ if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
+ dev_dbg(&mii->dev, "MII read failed\n");
+ return -1;
+ }
+
+ value = (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
+ ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
+ dev_dbg(&mii->dev, "MII read [%i] -> 0x%X\n", location, value);
+
+ return value;
+}
+
+static int ixp4xx_mdio_read(struct mii_device *mii, int phy_id, int location)
+{
+ int ret = ixp4xx_mdio_cmd(mii, 0, phy_id, location, 0);
+
+ dev_dbg(&mii->dev, "MII read [%i], read = %i\n", location, ret);
+
+ return ret;
+}
+
+static int ixp4xx_mdio_write(struct mii_device *mii, int phy_id, int location, int value)
+{
+ int ret = ixp4xx_mdio_cmd(mii, 1, phy_id, location, value);
+
+ dev_dbg(&mii->dev, "MII write [%i] <- 0x%X, err = %i\n", location, value, ret);
+
+ return ret;
+}
+
+static int ixp4xx_adjust_link(struct eth_device *dev)
+{
+ struct port *port = dev->priv;
+ int status, speed, duplex;
+
+ miidev_wait_aneg(&port->miidev);
+ status = miidev_get_status(&port->miidev);
+
+ duplex = status & MIIDEV_STATUS_IS_FULL_DUPLEX ? 1 : 0;
+ speed = status & MIIDEV_STATUS_IS_100MBIT ? 100 : 10;
+
+ if (status < 0)
+ goto err_out;
+
+ if (duplex)
+ __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
+ &port->regs->tx_control[0]);
+ else
+ __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
+ &port->regs->tx_control[0]);
+
+ dev_err(&dev->dev, "link up, speed %u Mb/s, %s duplex\n",
+ speed, duplex ? "full" : "half");
+ return 0;
+
+err_out:
+ dev_err(&dev->dev, "failed to read MII data\n");;
+ return status;
+}
+
+#if DEBUG_PKT_BYTES
+static inline void debug_pkt(struct eth_device *dev, const char *func,
+ u8 *data, int len)
+{
+ int i;
+
+ dev_dbg(&dev->dev, "%s(%4i) ", func, len);
+ for (i = 0; i < len; i++) {
+ if (i >= DEBUG_PKT_BYTES)
+ break;
+ dev_dbg(&dev->dev, "%s%02X",
+ ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
+ data[i]);
+ }
+ dev_dbg(&dev->dev, "\n");
+}
+#else
+static inline void debug_pkt(struct eth_device *dev, const char *func,
+ u8 *data, int len)
+{}
+#endif
+
+#if DEBUG_DESC
+static inline void debug_desc(struct desc *desc)
+{
+ fprintf(stderr, "%07X: %X %3X %3X %07X %2X < %2X %4X %X"
+ " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
+ (u32)desc, (u32)desc->next, desc->buf_len, desc->pkt_len,
+ (u32)desc->data, desc->dest_id, desc->src_id, desc->flags,
+ desc->qos, desc->padlen, desc->vlan_tci,
+ desc->dst_mac[0], desc->dst_mac[1], desc->dst_mac[2],
+ desc->dst_mac[3], desc->dst_mac[4], desc->dst_mac[5],
+ desc->src_mac[0], desc->src_mac[1], desc->src_mac[2],
+ desc->src_mac[3], desc->src_mac[4], desc->src_mac[5]);
+#else
+static inline void debug_desc(struct desc *desc)
+{
+}
+#endif
+
+static inline int queue_get_desc(unsigned int queue, struct port *port,
+ int is_tx)
+{
+ u32 addr, n;
+ struct desc *tab;
+
+ if (!(addr = qmgr_get_entry(queue)))
+ return -1;
+
+ addr &= ~0x1F; /* mask out non-address bits */
+ tab = is_tx ? port->tx_desc_tab : port->rx_desc_tab;
+ n = (addr - (u32)tab) / sizeof(struct desc);
+ BUG_ON(n >= (is_tx ? TX_DESCS : RX_DESCS));
+ debug_desc((struct desc*)addr);
+ BUG_ON(tab[n].next);
+ return n;
+}
+
+static inline void queue_put_desc(unsigned int queue, struct desc *desc)
+{
+ debug_desc(desc);
+ BUG_ON(((u32)desc) & 0x1F);
+ qmgr_put_entry(queue, (u32)desc);
+ /* Don't check for queue overflow here, we've allocated sufficient
+ length and queues >= 32 don't support this check anyway. */
+}
+
+static int ixp4xx_eth_poll(struct eth_device *dev)
+{
+ struct port *port = dev->priv;
+ struct desc *desc;
+ u8 *buff;
+ int n, len;
+
+ dev_dbg(&dev->dev, "eth_poll\n");
+
+ if ((n = queue_get_desc(RX_QUEUE(port), port, 0)) < 0) {
+ dev_dbg(&dev->dev, "eth_poll = no packet receive\n");
+ return 0;
+ }
+
+ barrier();
+ desc = &port->rx_desc_tab[n];
+ buff = rx_buff(port, n);
+ len = desc->pkt_len;
+ /* process received frame */
+ memcpy((void *)NetRxPackets[0], buff, len);
+ debug_pkt(dev, "RX", desc->data, len);
+
+ /* put the new buffer on RX-free queue */
+ desc->buf_len = MAX_MRU;
+ desc->pkt_len = 0;
+ queue_put_desc(RXFREE_QUEUE(port), desc);
+
+ net_receive(NetRxPackets[0], len);
+
+ dev_dbg(&dev->dev, "eth_poll end\n");
+ return 0;
+}
+
+
+static int ixp4xx_eth_xmit(struct eth_device *dev, void *data, int len)
+{
+ struct port *port = dev->priv;
+ int n;
+ struct desc *desc;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ if (unlikely(len > 1500))
+ return -1;
+
+ debug_pkt(dev, "TX", data, len);
+
+ if ((n = queue_get_desc(TXDONE_QUEUE, port, 1)) < 0)
+ return -1; /* no free buffers */
+ desc = &port->tx_desc_tab[n];
+ desc->data = tx_buff(port, n);
+ desc->buf_len = desc->pkt_len = len;
+ memcpy(desc->data, data, len);
+
+ /* NPE firmware pads short frames with zeros internally */
+ // wmb();
+ barrier();
+ queue_put_desc(TX_QUEUE(port), desc);
+
+ dev_dbg(&dev->dev, "%s end\n", __func__);
+ return 0;
+}
+
+static void request_queues(struct port *port, struct eth_device *dev)
+{
+ qmgr_request_queue(RXFREE_QUEUE(port), RX_DESCS, 0, 0, "%s:RX-free", dev->dev.name);
+ qmgr_request_queue(RX_QUEUE(port), RX_DESCS, 0, 0, "%s:RX", dev->dev.name);
+ qmgr_request_queue(TX_QUEUE(port), TX_DESCS, 0, 0, "%s:TX", dev->dev.name);
+
+ /* Common TX-done queue handles buffers sent out by the NPEs */
+ qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
+ "%s:TX-done", dev->dev.name);
+}
+
+static void release_queues(struct port *port)
+{
+ qmgr_release_queue(RXFREE_QUEUE(port));
+ qmgr_release_queue(RX_QUEUE(port));
+ qmgr_release_queue(TX_QUEUE(port));
+ qmgr_release_queue(TXDONE_QUEUE);
+}
+
+static void init_queues(struct port *port)
+{
+ int i;
+
+ memset(port->tx_desc_tab, 0, sizeof(port->tx_desc_tab)); /* descs */
+ memset(port->rx_desc_tab, 0, sizeof(port->rx_desc_tab));
+
+ /* Setup RX buffers */
+ for (i = 0; i < RX_DESCS; i++) {
+ struct desc *desc = &port->rx_desc_tab[i];
+ desc->buf_len = MAX_MRU;
+ desc->data = rx_buff(port, i);
+ }
+}
+
+static int ixp4xx_eth_open(struct eth_device *dev)
+{
+ struct port *port = dev->priv;
+ struct npe *npe = port->npe;
+ struct msg msg;
+ int i, err;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ if (!npe_running(npe)) {
+ err = npe_load_firmware(npe);
+ if (err)
+ return err;
+
+ if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
+ dev_err(&dev->dev, " %s not responding\n", npe->name);
+ return -EIO;
+ }
+ memcpy(port->firmware, msg.params + 2, 4);
+ }
+
+ err = ixp4xx_adjust_link(dev);
+ if (err)
+ return err;
+
+ port->buff_tab = xmalloc((RX_DESCS + TX_DESCS) * MAX_MRU);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = NPE_VLAN_SETRXQOSENTRY;
+ msg.eth_id = LOGICAL_ID(port);
+ msg.params[3] = RX_QUEUE(port) | 0x80;
+ msg.params[4] = RX_QUEUE(port) >> 4; /* MSB of offset */
+ msg.params[5] = RX_QUEUE(port) << 4; /* LSB of offset */
+ for (i = 0; i < 8; i++) {
+ msg.params[1] = i;
+ if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) {
+ err = -EIO;
+ goto out;
+ }
+ }
+
+ msg.cmd = NPE_EDB_SETPORTADDRESS;
+ msg.eth_id = PHYSICAL_ID(port);
+ memcpy(msg.params, port->pinfo->hwaddr, ETH_ALEN);
+ if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) {
+ err = -EIO;
+ goto out;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = NPE_FW_SETFIREWALLMODE;
+ msg.eth_id = LOGICAL_ID(port);
+ if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) {
+ err = -EIO;
+ goto out;
+ }
+
+ request_queues(port, dev);
+ init_queues(port);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ __raw_writel(port->pinfo->hwaddr[i], &port->regs->hw_addr[i]);
+ __raw_writel(0x08, &port->regs->random_seed);
+ __raw_writel(0x12, &port->regs->partial_empty_threshold);
+ __raw_writel(0x30, &port->regs->partial_full_threshold);
+ __raw_writel(0x08, &port->regs->tx_start_bytes);
+ __raw_writel(0x15, &port->regs->tx_deferral);
+ __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
+ __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
+ __raw_writel(0x80, &port->regs->slot_time);
+ __raw_writel(0x01, &port->regs->int_clock_threshold);
+
+ /* Populate queues with buffers, no failure after this point */
+ for (i = 0; i < TX_DESCS; i++)
+ queue_put_desc(TXDONE_QUEUE, &port->tx_desc_tab[i]);
+
+ for (i = 0; i < RX_DESCS; i++)
+ queue_put_desc(RXFREE_QUEUE(port), &port->rx_desc_tab[i]);
+
+ __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
+ __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
+ __raw_writel(0, &port->regs->rx_control[1]);
+ __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
+
+ memset(&msg, 0, sizeof(msg));
+ dev_dbg(&dev->dev, "%s opened\n", __func__);
+ return 0;
+out:
+ free(port->buff_tab);
+ port->buff_tab = NULL;
+ dev_dbg(&dev->dev, "%s open failed (%i)\n", __func__, err);
+ return err;
+}
+
+static void ixp4xx_eth_close(struct eth_device *dev)
+{
+ struct port *port = dev->priv;
+ struct msg msg;
+ int buffs = RX_DESCS; /* allocated RX buffers */
+ int i;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ if (!port->buff_tab)
+ return; /* already closed */
+
+ while (queue_get_desc(RXFREE_QUEUE(port), port, 0) >= 0)
+ buffs--;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = NPE_SETLOOPBACK_MODE;
+ msg.eth_id = LOGICAL_ID(port);
+ msg.params[1] = 1;
+ if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
+ fprintf(stderr, "%s%d: unable to enable loopback\n", dev->dev.name, dev->dev.id);
+
+ dev_dbg(&dev->dev, "%s: draining RX queue\n", __func__);
+ i = 0;
+ do { /* drain RX buffers */
+ while (queue_get_desc(RX_QUEUE(port), port, 0) >= 0)
+ buffs--;
+ if (!buffs)
+ break;
+ if (qmgr_stat_full(TXDONE_QUEUE) && !(i % 10)) {
+ /* we have to inject some packet */
+ struct desc *desc;
+ int n = queue_get_desc(TXDONE_QUEUE, port, 1);
+ BUG_ON(n < 0);
+ desc = &port->tx_desc_tab[n];
+ desc->buf_len = desc->pkt_len = 1;
+ //wmb();
+ barrier();
+ queue_put_desc(TX_QUEUE(port), desc);
+ }
+ udelay(1);
+ } while (++i < MAX_CLOSE_WAIT);
+
+ if (buffs)
+ fprintf(stderr, "%s%d: unable to drain RX queue, %i buffer(s) left in NPE\n",
+ dev->dev.name, dev->dev.id, buffs);
+ if (!buffs)
+ dev_dbg(&dev->dev, "%s: draining RX queue took %i cycles\n", __func__, i);
+
+ buffs = TX_DESCS;
+ while (queue_get_desc(TX_QUEUE(port), port, 1) >= 0)
+ buffs--; /* cancel TX */
+
+ i = 0;
+ do {
+ while (queue_get_desc(TXDONE_QUEUE, port, 1) >= 0)
+ buffs--;
+ if (!buffs)
+ break;
+ } while (++i < MAX_CLOSE_WAIT);
+
+ if (buffs)
+ fprintf(stderr, "%s%d: unable to drain TX queue, %i buffer(s) left in NPE\n",
+ dev->dev.name, dev->dev.id, buffs);
+ if (!buffs)
+ dev_dbg(&dev->dev, "%s: draining TX queue took %i cycles\n", __func__, i);
+
+ msg.params[1] = 0;
+ if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
+ fprintf(stderr, "%s%d: unable to disable loopback\n", dev->dev.name, dev->dev.id);
+
+ release_queues(port);
+ free(port->buff_tab);
+ port->buff_tab = NULL;
+ dev_dbg(&dev->dev, "%s: closed\n", __func__);
+}
+
+static int ixp4xx_eth_get_hwaddr(struct eth_device *eth, unsigned char *addr)
+{
+ struct port *port = eth->priv;
+ memcpy(addr, port->pinfo->hwaddr, 6);
+ return 0;
+}
+
+static int ixp4xx_eth_set_hwaddr(struct eth_device *eth, unsigned char *addr)
+{
+ struct port *port = eth->priv;
+ memcpy(port->pinfo->hwaddr, addr, 6);
+ return 0;
+}
+
+static int ixp4xx_eth_init(struct eth_device *eth)
+{
+ struct port *port = eth->priv;
+
+ __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
+ &port->regs->core_control);
+ udelay(50);
+ __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
+ udelay(50);
+
+ miidev_restart_aneg(&port->miidev);
+ return 0;
+}
+
+static int ixp4xx_eth_probe(struct device_d *dev)
+{
+ struct npe *npe;
+ struct port *port;
+ struct eth_plat_info *pinfo = dev->platform_data;
+ struct eth_regs *mdio_regs;
+
+ if (!pinfo) {
+ dev_err(dev, "ixp4xx_eth: no platform information\n");
+ return -ENODEV;
+ }
+
+ mdio_regs = dev_request_mem_region(dev, 0);
+ __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
+
+ if (!(npe = npe_request(pinfo->npe))) {
+ dev_err(dev, "ixp4xx_eth: unable to acquire NPE\n");
+ return -ENODEV;
+ }
+
+ port = xmemalign(0x20, sizeof(*port));
+ memset(port, 0, sizeof(*port));
+
+ port->regs = mdio_regs;
+ port->npe = npe;
+ port->pinfo = pinfo;
+ port->eth.dev.id = -1;
+ port->eth.priv = port;
+ port->eth.init = ixp4xx_eth_init;
+ port->eth.open = ixp4xx_eth_open;
+ port->eth.halt = ixp4xx_eth_close;
+ port->eth.send = ixp4xx_eth_xmit;
+ port->eth.recv = ixp4xx_eth_poll;
+ port->eth.get_ethaddr = ixp4xx_eth_get_hwaddr;
+ port->eth.set_ethaddr = ixp4xx_eth_set_hwaddr;
+ port->eth.parent = dev;
+
+ port->miidev.dev.id = -1;
+ port->miidev.read = ixp4xx_mdio_read;
+ port->miidev.write = ixp4xx_mdio_write;
+ port->miidev.address = pinfo->phy;
+ port->miidev.edev = &port->eth;
+ mii_register(&port->miidev);
+ eth_register(&port->eth);
+ return 0;
+}
+
+static struct driver_d ixp4xx_eth_driver = {
+ .name = "ixp4xx_eth",
+ .probe = ixp4xx_eth_probe,
+};
+
+static int __init ixp4xx_eth_module_init(void)
+{
+ return register_driver(&ixp4xx_eth_driver);
+}
+device_initcall(ixp4xx_eth_module_init);
--
1.7.9.1
More information about the barebox
mailing list