[PATCH V7 02/17] SPEAr13xx: Add PCIe host controller base driver support.
Rob Herring
robherring2 at gmail.com
Tue Apr 12 11:32:21 EDT 2011
On 03/22/2011 11:52 PM, Viresh Kumar wrote:
> From: Pratyush Anand<pratyush.anand at st.com>
>
> SPEAr13xx family contains Synopsys designware PCIe version 3.30a. This
> patch adds support for this PCIe module for spear platform.
>
Sorry for the late response.
Other vendors are likely to have the same IP, but none of the code here
is written to be common. In light of recent discussions this would be a
good candidate to avoid future duplication.
It doesn't appear that pcie.c contains much SPEAr specific code. The few
defines and structs that are could be passed in. Some comments below.
> Changes since V6:
> - Read request size in RC'c PCIE capability is forced to 128 bytes.
> - Max payload is forced to the minimum value of max payload of any of the
> device in tree.
> - Request_resource for IO Space is from ioport_resource now. Earlier it was from
> iomem_resource.
> - Callback for evb is made generic
> - RC is programmed as virtual bridge
> - Support for gen1 initilization as default option
> - code optimization and modification for cfg1 rd/wr
> - Adding valid VID and DID for RC
> - Inbound address range increased to 0-32GB address space
> - Review Comments incorporated. Main changes are:
> - Removed unnecessary define CONFIG_PCI
> - All HW addresses have been modified to __iomem *
> - unnecessary typecasting have been removed
> - all hardware address are being read by readl now
>
> Reviewed-by: Stanley Miao<stanley.miao at windriver.com>
> Signed-off-by: Pratyush Anand<pratyush.anand at st.com>
> Signed-off-by: shiraz hashim<shiraz.hashim at st.com>
> Signed-off-by: Viresh Kumar<viresh.kumar at st.com>
> ---
> arch/arm/Kconfig | 1 +
> arch/arm/mach-spear13xx/Makefile | 1 +
> arch/arm/mach-spear13xx/include/mach/hardware.h | 4 +
> arch/arm/mach-spear13xx/include/mach/irqs.h | 19 +-
> arch/arm/mach-spear13xx/include/mach/pcie.h | 176 ++++
> arch/arm/mach-spear13xx/pcie.c | 1129 +++++++++++++++++++++++
> arch/arm/mach-spear13xx/spear1300_evb.c | 38 +
> arch/arm/mach-spear13xx/spear1310_evb.c | 38 +
> arch/arm/mach-spear13xx/spear13xx.c | 28 +
> arch/arm/plat-spear/Kconfig | 2 +
> 10 files changed, 1435 insertions(+), 1 deletions(-)
> create mode 100644 arch/arm/mach-spear13xx/include/mach/pcie.h
> create mode 100644 arch/arm/mach-spear13xx/pcie.c
>
> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
> index d9efe86..0aca70d 100644
> --- a/arch/arm/Kconfig
> +++ b/arch/arm/Kconfig
> @@ -1263,6 +1263,7 @@ config PCI_HOST_ITE8152
> select DMABOUNCE
>
> source "drivers/pci/Kconfig"
> +source "drivers/pci/pcie/Kconfig"
>
> source "drivers/pcmcia/Kconfig"
>
> diff --git a/arch/arm/mach-spear13xx/Makefile b/arch/arm/mach-spear13xx/Makefile
> index 24bbe16..2a113b0 100644
> --- a/arch/arm/mach-spear13xx/Makefile
> +++ b/arch/arm/mach-spear13xx/Makefile
> @@ -7,6 +7,7 @@ obj-y += spear13xx.o clock.o
> obj-$(CONFIG_SMP) += platsmp.o headsmp.o
> obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
> obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
> +obj-$(CONFIG_PCIEPORTBUS) += pcie.o
>
> # spear1300 specific files
> obj-$(CONFIG_MACH_SPEAR1300) += spear1300.o
> diff --git a/arch/arm/mach-spear13xx/include/mach/hardware.h b/arch/arm/mach-spear13xx/include/mach/hardware.h
> index fd8c2dc..6169d4f 100644
> --- a/arch/arm/mach-spear13xx/include/mach/hardware.h
> +++ b/arch/arm/mach-spear13xx/include/mach/hardware.h
> @@ -28,4 +28,8 @@
> /* typesafe io address */
> #define __io_address(n) __io(IO_ADDRESS(n))
>
> +#define PCIBIOS_MIN_IO 0
> +#define PCIBIOS_MIN_MEM 0
> +#define pcibios_assign_all_busses() 0
> +
> #endif /* __MACH_HARDWARE_H */
> diff --git a/arch/arm/mach-spear13xx/include/mach/irqs.h b/arch/arm/mach-spear13xx/include/mach/irqs.h
> index c4f0c9d..59bf61a 100644
> --- a/arch/arm/mach-spear13xx/include/mach/irqs.h
> +++ b/arch/arm/mach-spear13xx/include/mach/irqs.h
> @@ -130,7 +130,24 @@
> #define SPEAR_GPIO1_INT_BASE (SPEAR_GPIO0_INT_BASE + 8)
> #define SPEAR_GPIO_INT_END (SPEAR_GPIO1_INT_BASE + 8)
>
> -#define VIRQ_END SPEAR_GPIO_INT_END
> +/* PCIE MSI virtual irqs */
> +#define SPEAR_NUM_MSI_IRQS 64
> +#define SPEAR_MSI0_INT_BASE (SPEAR_GPIO_INT_END + 0)
> +#define SPEAR_MSI0_INT_END (SPEAR_MSI0_INT_BASE + SPEAR_NUM_MSI_IRQS)
> +#define SPEAR_MSI1_INT_BASE (SPEAR_MSI0_INT_END + 0)
> +#define SPEAR_MSI1_INT_END (SPEAR_MSI1_INT_BASE + SPEAR_NUM_MSI_IRQS)
> +#define SPEAR_MSI2_INT_BASE (SPEAR_MSI1_INT_END + 0)
> +#define SPEAR_MSI2_INT_END (SPEAR_MSI2_INT_BASE + SPEAR_NUM_MSI_IRQS)
> +
> +#define SPEAR_NUM_INTX_IRQS 4
> +#define SPEAR_INTX0_BASE (SPEAR_MSI2_INT_END + 0)
> +#define SPEAR_INTX0_END (SPEAR_INTX0_BASE + SPEAR_NUM_INTX_IRQS)
> +#define SPEAR_INTX1_BASE (SPEAR_INTX0_END + 0)
> +#define SPEAR_INTX1_END (SPEAR_INTX1_BASE + SPEAR_NUM_INTX_IRQS)
> +#define SPEAR_INTX2_BASE (SPEAR_INTX1_END + 0)
> +#define SPEAR_INTX2_END (SPEAR_INTX2_BASE + SPEAR_NUM_INTX_IRQS)
> +
> +#define VIRQ_END SPEAR_INTX2_END
> #define NR_IRQS VIRQ_END
>
> #endif /* __MACH_IRQS_H */
> diff --git a/arch/arm/mach-spear13xx/include/mach/pcie.h b/arch/arm/mach-spear13xx/include/mach/pcie.h
> new file mode 100644
> index 0000000..7ea10da
> --- /dev/null
> +++ b/arch/arm/mach-spear13xx/include/mach/pcie.h
> @@ -0,0 +1,176 @@
> +/*
> + * arch/arm/mach-spear13xx/include/mach/pcie.h
> + *
> + * Spear SoC PCIe handling.
> + *
> + * Copyright (C) 2010 ST Microelectronics
> + * Pratyush Anand<pratyush.anand at st.com>
> + *
> + * This file is licensed under the terms of the GNU General Public
> + * License version 2. This program is licensed "as is" without any
> + * warranty of any kind, whether express or implied.
> + */
> +
> +#ifndef __MACH_PCIE_H
> +#define __MACH_PCIE_H
> +
> +extern int enable_pcie0_clk(void);
> +
> +struct pcie_port_info {
> + u8 is_host;
> + u8 is_gen1;
> +};
> +extern struct pcie_port_info *(*pcie_port_init)(int port);
> +
> +struct pcie_port {
3 other structs with the same name and similar fields already exist:
arch/arm/mach-kirkwood/pcie.c:27:struct pcie_port {
arch/arm/mach-dove/pcie.c:23:struct pcie_port {
arch/arm/mach-mv78xx0/pcie.c:19:struct pcie_port {
> + u8 port;
> + u8 root_bus_nr;
> + void __iomem *base;
> + void __iomem *app_base;
> + void __iomem *va_app_base;
> + void __iomem *va_dbi_base;
> + void __iomem *va_cfg0_base;
> + void __iomem *va_cfg1_base;
> + spinlock_t conf_lock;
> + char mem_space_name[16];
> + char io_space_name[16];
> + struct resource res[2];
> + struct pcie_port_info config;
> +};
> +
> +struct pcie_app_reg {
> + u32 app_ctrl_0; /*cr0*/
> + u32 app_ctrl_1; /*cr1*/
> + u32 app_status_0; /*cr2*/
> + u32 app_status_1; /*cr3*/
> + u32 msg_status; /*cr4*/
> + u32 msg_payload; /*cr5*/
> + u32 int_sts; /*cr6*/
> + u32 int_clr; /*cr7*/
> + u32 int_mask; /*cr8*/
> + u32 mst_bmisc; /*cr9*/
> + u32 phy_ctrl; /*cr10*/
> + u32 phy_status; /*cr11*/
> + u32 cxpl_debug_info_0; /*cr12*/
> + u32 cxpl_debug_info_1; /*cr13*/
> + u32 ven_msg_ctrl_0; /*cr14*/
> + u32 ven_msg_ctrl_1; /*cr15*/
> + u32 ven_msg_data_0; /*cr16*/
> + u32 ven_msg_data_1; /*cr17*/
> + u32 ven_msi_0; /*cr18*/
> + u32 ven_msi_1; /*cr19*/
> + u32 mst_rmisc; /*cr 20*/
> + u32 slv_awmisc; /*cr 21*/
> + u32 slv_armisc; /*cr 22*/
> + u32 pom0_mem_addr_start; /*cr23*/
> + u32 pom1_mem_addr_start; /*cr24*/
> + u32 pom_io_addr_start; /*cr25*/
> + u32 pom_cfg0_addr_start; /*cr26*/
> + u32 pom_cfg1_addr_start; /*cr27*/
> + u32 in0_mem_addr_start; /*cr28*/
> + u32 in1_mem_addr_start; /*cr29*/
> + u32 in_io_addr_start; /*cr30*/
> + u32 in_cfg0_addr_start; /*cr31*/
> + u32 in_cfg1_addr_start; /*cr32*/
> + u32 in_msg_addr_start; /*cr33*/
> + u32 in0_mem_addr_limit; /*cr34*/
> + u32 in1_mem_addr_limit; /*cr35*/
> + u32 in_io_addr_limit; /*cr36*/
> + u32 in_cfg0_addr_limit; /*cr37*/
> + u32 in_cfg1_addr_limit; /*cr38*/
> + u32 in_msg_addr_limit; /*cr39*/
> + u32 mem0_addr_offset_limit; /*cr40*/
> + u32 pim0_mem_addr_start; /*cr41*/
> + u32 pim1_mem_addr_start; /*cr42*/
> + u32 pim_io_addr_start; /*cr43*/
> + u32 pim_rom_addr_start; /*cr44*/
> +};
> +
> +/*CR0 ID*/
> +#define RX_LANE_FLIP_EN_ID 0
> +#define TX_LANE_FLIP_EN_ID 1
> +#define SYS_AUX_PWR_DET_ID 2
> +#define APP_LTSSM_ENABLE_ID 3
> +#define SYS_ATTEN_BUTTON_PRESSED_ID 4
> +#define SYS_MRL_SENSOR_STATE_ID 5
> +#define SYS_PWR_FAULT_DET_ID 6
> +#define SYS_MRL_SENSOR_CHGED_ID 7
> +#define SYS_PRE_DET_CHGED_ID 8
> +#define SYS_CMD_CPLED_INT_ID 9
> +#define APP_INIT_RST_0_ID 11
> +#define APP_REQ_ENTR_L1_ID 12
> +#define APP_READY_ENTR_L23_ID 13
> +#define APP_REQ_EXIT_L1_ID 14
> +#define DEVICE_TYPE_EP (0<< 25)
> +#define DEVICE_TYPE_LEP (1<< 25)
> +#define DEVICE_TYPE_RC (4<< 25)
> +#define SYS_INT_ID 29
> +#define MISCTRL_EN_ID 30
> +#define REG_TRANSLATION_ENABLE 31
> +
> +/*CR1 ID*/
> +#define APPS_PM_XMT_TURNOFF_ID 2
> +#define APPS_PM_XMT_PME_ID 5
> +
> +/*CR3 ID*/
> +#define XMLH_LTSSM_STATE_ID 0
> +#define XMLH_LTSSM_STATE_L0 ((u32)0x11<< XMLH_LTSSM_STATE_ID)
> +#define XMLH_LTSSM_STATE_MASK ((u32)0x1F<< XMLH_LTSSM_STATE_ID)
> +#define XMLH_LINK_UP_ID 5
> +
> +/*CR4 ID*/
> +#define CFG_MSI_EN_ID 18
> +
> +/*CR6*/
> +#define INTA_CTRL_INT (1<< 7)
> +#define INTB_CTRL_INT (1<< 8)
> +#define INTC_CTRL_INT (1<< 9)
> +#define INTD_CTRL_INT (1<< 10)
> +#define MSI_CTRL_INT (1<< 26)
> +
> +/*CR19 ID*/
> +#define VEN_MSI_REQ_ID 11
> +#define VEN_MSI_FUN_NUM_ID 8
> +#define VEN_MSI_TC_ID 5
> +#define VEN_MSI_VECTOR_ID 0
> +#define VEN_MSI_REQ_EN ((u32)0x1<< VEN_MSI_REQ_ID)
> +#define VEN_MSI_FUN_NUM_MASK ((u32)0x7<< VEN_MSI_FUN_NUM_ID)
> +#define VEN_MSI_TC_MASK ((u32)0x7<< VEN_MSI_TC_ID)
> +#define VEN_MSI_VECTOR_MASK ((u32)0x1F<< VEN_MSI_VECTOR_ID)
> +
> +/*CE21-22 ID*/
> +/*ID definitio of ARMISC*/
> +#define AXI_OP_TYPE_ID 0
> +#define AXI_OP_BCM_ID 5
> +#define AXI_OP_EP_ID 6
> +#define AXI_OP_TD_ID 7
> +#define AXI_OP_ATTRIBUTE_ID 8
> +#define AXI_OP_TC_ID 10
> +#define AXI_OP_MSG_CODE_ID 13
> +#define AXI_OP_DBI_ACCESS_ID 21
> +#define AXI_OP_TYPE_MASK 0x1F
> +#define AXI_OP_TYPE_MEM_RDRW 0
> +#define AXI_OP_TYPE_MEM_RDRW_LOCKED 1
> +#define AXI_OP_TYPE_IO_RDRW 2
> +#define AXI_OP_TYPE_CONFIG_RDRW_TYPE0 4
> +#define AXI_OP_TYPE_CONFIG_RDRW_TYPE1 5
> +#define AXI_OP_TYPE_MSG_REQ 16
> +#define AXI_OP_TYPE_COMPLETION 10
> +#define AXI_OP_TYPE_COMPLETION_LOCKED 11
> +#define AXI_OP_TYPE_DBI_ELBI_ENABLE 1
> +
> +/* synopsis specific PCIE configuration registers*/
> +#define PCIE_MSI_ADDR_LO 0x820 /* 32 bits */
> +#define PCIE_MSI_ADDR_HI 0x824 /* 32 bits */
> +#define PCIE_MSI_INTR0_ENABLE 0x828 /* 32 bits */
> +#define PCIE_MSI_INTR0_MASK 0x82C /* 32 bits */
> +#define PCIE_MSI_INTR0_STATUS 0x830 /* 32 bits */
> +
> +/*BAR MASK registers*/
> +#define PCIE_BAR0_MASK_REG 0x1010
> +
> +static inline void pcie_init(struct pcie_port_info * (*fptr)(int port))
> +{
> + pcie_port_init = fptr;
> +}
> +#endif
> diff --git a/arch/arm/mach-spear13xx/pcie.c b/arch/arm/mach-spear13xx/pcie.c
> new file mode 100644
> index 0000000..104dd03
> --- /dev/null
> +++ b/arch/arm/mach-spear13xx/pcie.c
> @@ -0,0 +1,1129 @@
> +/*
> + * arch/arm/mach-spear13xx/pcie.c
> + *
> + * PCIe functions for SPEAr 13xx SoCs
> + *
> + * Copyright (C) 2010 ST Microelectronics
> + * Pratyush Anand<pratyush.anand at st.com>
> + *
> + * This file is licensed under the terms of the GNU General Public
> + * License version 2. This program is licensed "as is" without any
> + * warranty of any kind, whether express or implied.
> + */
> +
> +#include<linux/clk.h>
> +#include<linux/delay.h>
> +#include<linux/kernel.h>
> +#include<linux/pci.h>
> +#include<linux/pci_regs.h>
> +#include<linux/msi.h>
> +#include<linux/mbus.h>
> +#include<linux/sched.h>
> +#include<asm/irq.h>
> +#include<asm/mach/irq.h>
> +#include<asm/mach/pci.h>
> +#include<mach/hardware.h>
> +#include<mach/misc_regs.h>
> +#include<mach/pcie.h>
> +
> +#define NUM_PCIE_PORTS 3
This could be variable or the parts dependent on it moved out of the
common code.
> +
> +/* Sum of all these space can maximum be 256MB*/
> +#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1)
> +
> +/*
> + * In current implementation address translation is done using IN0 only. So IN1
> + * start address and IN0 end address has been kept same
> +*/
> +#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1)
> +#define IN_IO_SIZE (20 * 1024 * 1024 - 1)
> +#define IN_CFG0_SIZE (1 * 1024 * 1024 - 1)
> +#define IN_CFG1_SIZE (1 * 1024 * 1024 - 1)
> +#define IN_MSG_SIZE (1 * 1024 * 1024 - 1)
> +
As can these.
> +#define MAX_LINK_UP_WAIT_MS 2
> +
> +/* Keeping 0-32GB of address range accesible for inbound transaction */
> +#define INBOUND_ADDR_MASK 0x7FFFFFFF
> +
> +struct pcie_port_info *(*pcie_port_init)(int port);
> +static struct pcie_port pcie_port[NUM_PCIE_PORTS];
> +static void __iomem *pcie_base[NUM_PCIE_PORTS] = {
> + IOMEM(SPEAR13XX_PCIE0_BASE),
> + IOMEM(SPEAR13XX_PCIE1_BASE),
> + IOMEM(SPEAR13XX_PCIE2_BASE),
> +};
> +static void __iomem *pcie_app_base[NUM_PCIE_PORTS] = {
> + IOMEM(SPEAR13XX_PCIE0_APP_BASE),
> + IOMEM(SPEAR13XX_PCIE1_APP_BASE),
> + IOMEM(SPEAR13XX_PCIE2_APP_BASE),
> +};
This could be part of pcie_port_info.
> +
> +static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
> +{
> + /* Enable DBI access */
> + writel(readl(&app_reg->slv_armisc) | (1<< AXI_OP_DBI_ACCESS_ID),
> + &app_reg->slv_armisc);
> + writel(readl(&app_reg->slv_awmisc) | (1<< AXI_OP_DBI_ACCESS_ID),
> + &app_reg->slv_awmisc);
> + wmb();
> +}
> +
> +static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
> +{
> + /* disable DBI access */
> + wmb();
> + writel(readl(&app_reg->slv_armisc)& ~(1<< AXI_OP_DBI_ACCESS_ID),
> + &app_reg->slv_armisc);
> + writel(readl(&app_reg->slv_awmisc)& ~(1<< AXI_OP_DBI_ACCESS_ID),
> + &app_reg->slv_awmisc);
> +}
> +
> +static void spear_dbi_read_reg(struct pcie_port *pp, int where, int size,
> + u32 *val)
> +{
> + struct pcie_app_reg __iomem *app_reg = pp->va_app_base;
> + u32 va_address;
> +
> + /* Enable DBI access */
> + enable_dbi_access(app_reg);
> +
> + va_address = (u32)pp->va_dbi_base + (where& ~0x3);
> +
> + *val = readl(va_address);
> +
> + if (size == 1)
> + *val = (*val>> (8 * (where& 3)))& 0xff;
> + else if (size == 2)
> + *val = (*val>> (8 * (where& 3)))& 0xffff;
> +
> + /* Disable DBI access */
> + disable_dbi_access(app_reg);
> +}
> +
> +static void spear_dbi_write_reg(struct pcie_port *pp, int where, int size,
> + u32 val)
> +{
> + struct pcie_app_reg __iomem *app_reg = pp->va_app_base;
> + u32 va_address;
> +
> + /* Enable DBI access */
> + enable_dbi_access(app_reg);
> +
> + va_address = (u32)pp->va_dbi_base + (where& ~0x3);
> +
> + if (size == 4)
> + writel(val, va_address);
> + else if (size == 2)
> + writew(val, va_address + (where& 2));
> + else if (size == 1)
> + writeb(val, va_address + (where& 3));
> +
> + /* Disable DBI access */
> + disable_dbi_access(app_reg);
> +}
> +
> +#define PCI_FIND_CAP_TTL 48
> +
> +static int pci_find_own_next_cap_ttl(struct pcie_port *pp,
> + u32 pos, int cap, int *ttl)
> +{
> + u32 id;
> +
> + while ((*ttl)--) {
> + spear_dbi_read_reg(pp, pos, 1,&pos);
> + if (pos< 0x40)
> + break;
> + pos&= ~3;
> + spear_dbi_read_reg(pp, pos + PCI_CAP_LIST_ID, 1,&id);
> + if (id == 0xff)
> + break;
> + if (id == cap)
> + return pos;
> + pos += PCI_CAP_LIST_NEXT;
> + }
> + return 0;
> +}
> +
> +static int pci_find_own_next_cap(struct pcie_port *pp, u32 pos, int cap)
> +{
> + int ttl = PCI_FIND_CAP_TTL;
> +
> + return pci_find_own_next_cap_ttl(pp, pos, cap,&ttl);
> +}
> +
> +static int pci_find_own_cap_start(struct pcie_port *pp, u8 hdr_type)
> +{
> + u32 status;
> +
> + spear_dbi_read_reg(pp, PCI_STATUS, 2,&status);
> + if (!(status& PCI_STATUS_CAP_LIST))
> + return 0;
> +
> + switch (hdr_type) {
> + case PCI_HEADER_TYPE_NORMAL:
> + case PCI_HEADER_TYPE_BRIDGE:
> + return PCI_CAPABILITY_LIST;
> + case PCI_HEADER_TYPE_CARDBUS:
> + return PCI_CB_CAPABILITY_LIST;
> + default:
> + return 0;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * Tell if a device supports a given PCI capability.
> + * Returns the address of the requested capability structure within the
> + * device's PCI configuration space or 0 in case the device does not
> + * support it. Possible values for @cap:
> + *
> + * %PCI_CAP_ID_PM Power Management
> + * %PCI_CAP_ID_AGP Accelerated Graphics Port
> + * %PCI_CAP_ID_VPD Vital Product Data
> + * %PCI_CAP_ID_SLOTID Slot Identification
> + * %PCI_CAP_ID_MSI Message Signalled Interrupts
> + * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
> + * %PCI_CAP_ID_PCIX PCI-X
> + * %PCI_CAP_ID_EXP PCI Express
> + */
> +static int pci_find_own_capability(struct pcie_port *pp, int cap)
> +{
> + u32 pos;
> + u32 hdr_type;
> +
> + spear_dbi_read_reg(pp, PCI_HEADER_TYPE, 1,&hdr_type);
> +
> + pos = pci_find_own_cap_start(pp, hdr_type);
> + if (pos)
> + pos = pci_find_own_next_cap(pp, pos, cap);
> +
> + return pos;
> +}
> +
> +static struct pcie_port *bus_to_port(int bus)
> +{
> + int i;
> +
> + for (i = NUM_PCIE_PORTS - 1; i>= 0; i--) {
> + int rbus = pcie_port[i].root_bus_nr;
> + if (!(pcie_port[i].config.is_host))
> + continue;
> + if (rbus != -1&& rbus<= bus)
> + break;
> + }
> +
> + return i>= 0 ? pcie_port + i : NULL;
> +}
> +
> +#ifdef CONFIG_PCI_MSI
> +static DECLARE_BITMAP(msi_irq_in_use[NUM_PCIE_PORTS], SPEAR_NUM_MSI_IRQS);
> +static unsigned int spear_msi_data[NUM_PCIE_PORTS];
> +
> +/* MSI int handler
> + */
> +static void handle_msi(struct pcie_port *pp)
> +{
> + unsigned long val;
> + int i, pos;
> +
> + for (i = 0; i< 8; i++) {
> + spear_dbi_read_reg(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
> + (u32 *)&val);
> + if (val) {
> + pos = 0;
> + while ((pos = find_next_bit(&val, 32, pos)) != 32) {
> + generic_handle_irq(SPEAR_MSI0_INT_BASE
> + + pp->port * SPEAR_NUM_MSI_IRQS
> + + (i * 32) + pos);
> + pos++;
> + }
> + }
> + spear_dbi_write_reg(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val);
> + }
> +}
> +
> +static int find_valid_pos0(int port, int nvec, int pos, int *pos0)
> +{
> + int flag = 1;
> + do {
> + pos = find_next_zero_bit(msi_irq_in_use[port],
> + SPEAR_NUM_MSI_IRQS, pos);
> + /*if you have reached to the end then get out from here.*/
> + if (pos == SPEAR_NUM_MSI_IRQS)
> + return -ENOSPC;
> + /* Check if this position is at correct offset.nvec is always a
> + * power of two. pos0 must be nvec bit alligned.
> + */
> + if (pos % nvec)
> + pos += nvec - (pos % nvec);
> + else
> + flag = 0;
> + } while (flag);
> +
> + *pos0 = pos;
> + return 0;
> +}
> +
> +static void spear13xx_msi_nop(unsigned int irq)
> +{
> + return;
> +}
> +
> +static struct irq_chip spear13xx_msi_chip = {
> + .name = "PCI-MSI",
> + .ack = spear13xx_msi_nop,
> + .irq_enable = unmask_msi_irq,
> + .irq_disable = mask_msi_irq,
> + .irq_mask = mask_msi_irq,
> + .irq_unmask = unmask_msi_irq,
> +};
> +
> +/*
> + * Dynamic irq allocate and deallocation
> + */
> +static int get_irq(int nvec, struct msi_desc *desc, int *pos)
> +{
> + int res, bit, irq, pos0, pos1, i;
> + u32 val;
> + struct pcie_port *pp = bus_to_port(desc->dev->bus->number);
> +
> + pos0 = find_first_zero_bit(msi_irq_in_use[pp->port],
> + SPEAR_NUM_MSI_IRQS);
> + if (pos0 % nvec) {
> + if (find_valid_pos0(pp->port, nvec, pos0,&pos0))
> + goto no_valid_irq;
> + }
> + if (nvec> 1) {
> + pos1 = find_next_bit(msi_irq_in_use[pp->port],
> + SPEAR_NUM_MSI_IRQS, pos0);
> + /* there must be nvec number of consecutive free bits */
> + while ((pos1 - pos0)< nvec) {
> + if (find_valid_pos0(pp->port, nvec, pos1,&pos0))
> + goto no_valid_irq;
> + pos1 = find_next_bit(msi_irq_in_use[pp->port],
> + SPEAR_NUM_MSI_IRQS, pos0);
> + }
> + }
> +
> + irq = (SPEAR_MSI0_INT_BASE + (pp->port * SPEAR_NUM_MSI_IRQS)) + pos0;
> +
> + if ((irq + nvec)> (SPEAR_MSI0_INT_END
> + + (pp->port * SPEAR_NUM_MSI_IRQS)))
> + goto no_valid_irq;
> +
> + i = 0;
> + while (i< nvec) {
> + set_bit(pos0 + i, msi_irq_in_use[pp->port]);
> + dynamic_irq_init(irq + i);
> + set_irq_msi(irq + i, desc);
> + set_irq_chip_and_handler(irq + i,&spear13xx_msi_chip,
> + handle_simple_irq);
> +
> + /* Enable corresponding interrupt on MSI interrupt
> + * controller.
> + */
> + res = ((pos0 + i) / 32) * 12;
> + bit = (pos0 + i) % 32;
> + spear_dbi_read_reg(pp, PCIE_MSI_INTR0_ENABLE + res, 4,&val);
> + val |= 1<< bit;
> + spear_dbi_write_reg(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
> +
> + i++;
> + }
> +
> + *pos = pos0;
> + return irq;
> +no_valid_irq:
> + *pos = pos0;
> + return -ENOSPC;
> +}
> +
> +static void clean_irq(unsigned int irq)
> +{
> + int res, bit, val, pos;
> + struct irq_desc *desc = irq_to_desc(irq);
> + struct pcie_port *pp = bus_to_port(desc->msi_desc->dev->bus->number);
> +
> + pos = irq - (SPEAR_MSI0_INT_BASE + (pp->port * SPEAR_NUM_MSI_IRQS));
> +
> + dynamic_irq_cleanup(irq);
> +
> + clear_bit(pos, msi_irq_in_use[pp->port]);
> +
> + /* Disable corresponding interrupt on MSI interrupt
> + * controller.
> + */
> + res = (pos / 32) * 12;
> + bit = pos % 32;
> + spear_dbi_read_reg(pp, PCIE_MSI_INTR0_ENABLE + res, 4,&val);
> + val&= ~(1<< bit);
> + spear_dbi_write_reg(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
> +
> +}
> +
> +int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
> +{
> + int cvec, rvec, irq, pos;
> + struct msi_msg msg;
> + uint16_t control;
> + struct pcie_port *pp = bus_to_port(pdev->bus->number);
> +
> + /*
> + * Read the MSI config to figure out how many IRQs this device
> + * wants.Most devices only want 1, which will give
> + * configured_private_bits and request_private_bits equal 0.
> + */
> + pci_read_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
> + &control);
> +
> + /*
> + * If the number of private bits has been configured then use
> + * that value instead of the requested number. This gives the
> + * driver the chance to override the number of interrupts
> + * before calling pci_enable_msi().
> + */
> +
> + cvec = (control& PCI_MSI_FLAGS_QSIZE)>> 4;
> +
> + if (cvec == 0) {
> + /* Nothing is configured, so use the hardware requested size */
> + rvec = (control& PCI_MSI_FLAGS_QMASK)>> 1;
> + } else {
> + /*
> + * Use the number of configured bits, assuming the
> + * driver wanted to override the hardware request
> + * value.
> + */
> + rvec = cvec;
> + }
> +
> + /*
> + * The PCI 2.3 spec mandates that there are at most 32
> + * interrupts. If this device asks for more, only give it one.
> + */
> + if (rvec> 5)
> + rvec = 0;
> +
> + irq = get_irq((1<< rvec), desc,&pos);
> +
> + if (irq< 0)
> + return irq;
> +
> + /* Update the number of IRQs the device has available to it */
> + control&= ~PCI_MSI_FLAGS_QSIZE;
> + control |= rvec<< 4;
> + pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
> + control);
> + desc->msi_attrib.multiple = rvec;
> +
> + /* An EP will modify lower 8 bits(max) of msi data while
> + * sending any msi interrupt
> + */
> + msg.address_hi = 0x0;
> + msg.address_lo = __virt_to_phys((u32)(&spear_msi_data[pp->port]));
> + msg.data = pos;
> + write_msi_msg(irq,&msg);
> +
> + return 0;
> +}
> +
> +void arch_teardown_msi_irq(unsigned int irq)
> +{
> + clean_irq(irq);
> +}
> +
> +static void spear13xx_msi_init(struct pcie_port *pp)
> +{
> + struct pcie_app_reg *app_reg = (struct pcie_app_reg *)pp->va_app_base;
> +
> + spear_dbi_write_reg(pp, PCIE_MSI_ADDR_LO, 4,
> + __virt_to_phys((u32)(&spear_msi_data[pp->port])));
> + spear_dbi_write_reg(pp, PCIE_MSI_ADDR_HI, 4, 0);
> + /* Enbale MSI interrupt*/
> + writel(readl(&app_reg->int_mask) | MSI_CTRL_INT,
> + &app_reg->int_mask);
> +}
> +#endif
> +
> +static int spear13xx_pcie_link_up(void __iomem *va_app_base)
> +{
> + struct pcie_app_reg __iomem *app_reg = va_app_base;
> + int ucount = 0;
> +
> + do {
> + if (readl(&app_reg->app_status_1)&
> + ((u32)1<< XMLH_LINK_UP_ID))
> + return 1;
> + ucount++;
> + udelay(1);
> + } while (ucount<= MAX_LINK_UP_WAIT_MS * 1000);
> +
> + return 0;
> +}
> +
> +static void spear13xx_pcie_host_init(struct pcie_port *pp)
> +{
> + struct pcie_app_reg __iomem *app_reg = pp->va_app_base;
> + u32 cap, val;
> +
> + /*setup registers for outbound translation */
> +
> + writel(pp->base,&app_reg->in0_mem_addr_start);
> + writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
> + &app_reg->in0_mem_addr_limit);
> + writel(app_reg->in0_mem_addr_limit + 1,&app_reg->in1_mem_addr_start);
> + writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
> + &app_reg->in1_mem_addr_limit);
> + writel(app_reg->in1_mem_addr_limit + 1,&app_reg->in_io_addr_start);
> + writel(app_reg->in_io_addr_start + IN_IO_SIZE,
> + &app_reg->in_io_addr_limit);
> + writel(app_reg->in_io_addr_limit + 1,&app_reg->in_cfg0_addr_start);
> + writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
> + &app_reg->in_cfg0_addr_limit);
> + writel(app_reg->in_cfg0_addr_limit + 1,&app_reg->in_cfg1_addr_start);
> + writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
> + &app_reg->in_cfg1_addr_limit);
> + writel(app_reg->in_cfg1_addr_limit + 1,&app_reg->in_msg_addr_start);
> + writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
> + &app_reg->in_msg_addr_limit);
> +
> + writel(app_reg->in0_mem_addr_start,&app_reg->pom0_mem_addr_start);
> + writel(app_reg->in1_mem_addr_start,&app_reg->pom1_mem_addr_start);
> + writel(app_reg->in_io_addr_start,&app_reg->pom_io_addr_start);
> +
> + /*setup registers for inbound translation */
> +
> + writel((u32)INBOUND_ADDR_MASK + 1,&app_reg->mem0_addr_offset_limit);
> + writel(0,&app_reg->pim0_mem_addr_start);
> + writel(0,&app_reg->pim1_mem_addr_start);
> + spear_dbi_write_reg(pp, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
> + spear_dbi_write_reg(pp, PCI_BASE_ADDRESS_0, 4, 0);
> +
> + writel(0x0,&app_reg->pim_io_addr_start);
> + writel(0x0,&app_reg->pim_io_addr_start);
> + writel(0x0,&app_reg->pim_rom_addr_start);
> +
> + cap = pci_find_own_capability(pp, PCI_CAP_ID_EXP);
> + /*this controller support only 128 bytes read size, however its
> + * default value in capability register is 512 bytes. So force
> + * it to 128 here */
> +
> + spear_dbi_read_reg(pp, cap + PCI_EXP_DEVCTL, 4,&val);
> + val&= ~PCI_EXP_DEVCTL_READRQ;
> + spear_dbi_write_reg(pp, cap + PCI_EXP_DEVCTL, 4, val);
> +
> + /*program correct class for RC*/
> + spear_dbi_read_reg(pp, PCI_CLASS_REVISION, 4,&val);
> + val&= 0xFFFF;
> + val |= (PCI_CLASS_BRIDGE_PCI<< 16);
> + spear_dbi_write_reg(pp, PCI_CLASS_REVISION, 4, val);
> + /*program vid and did for RC*/
> + spear_dbi_write_reg(pp, PCI_VENDOR_ID, 2, 0x104A);
> + spear_dbi_write_reg(pp, PCI_DEVICE_ID, 2, 0xCD80);
> + /*if is_gen1 is set then handle it*/
> + if (pp->config.is_gen1) {
> + cap = pci_find_own_capability(pp, PCI_CAP_ID_EXP);
> + spear_dbi_read_reg(pp, cap + PCI_EXP_LNKCAP, 4,&val);
> + if ((val& 0xF) != 1) {
> + val&= ~((u32)0xF);
> + val |= 1;
> + spear_dbi_write_reg(pp, cap + PCI_EXP_LNKCAP, 4,
> + val);
> + }
> + spear_dbi_read_reg(pp, cap + PCI_EXP_LNKCTL2, 4,&val);
> + if ((val& 0xF) != 1) {
> + val&= ~((u32)0xF);
> + val |= 1;
> + spear_dbi_write_reg(pp, cap + PCI_EXP_LNKCTL2, 4,
> + val);
> + }
> + }
> +
> + writel(DEVICE_TYPE_RC | (1<< MISCTRL_EN_ID)
> + | (1<< APP_LTSSM_ENABLE_ID)
> + | ((u32)1<< REG_TRANSLATION_ENABLE),
> + &app_reg->app_ctrl_0);
> +}
> +
> +static void __init spear13xx_pcie_preinit(void)
> +{
> + int i;
> + struct pcie_port *pp;
> + struct pcie_app_reg __iomem *app_reg;
> +
> + for (i = 0; i< NUM_PCIE_PORTS; i++) {
> + pp = pcie_port + i;
> + app_reg = pp->va_app_base;
> +
> + if (!(pp->config.is_host))
> + continue;
> + snprintf(pp->mem_space_name, sizeof(pp->mem_space_name),
> + "PCIe %d MEM", pp->port);
> + pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0;
> + pp->res[0].name = pp->mem_space_name;
> + pp->res[0].start = readl(&app_reg->in0_mem_addr_start);
> + pp->res[0].end = readl(&app_reg->in0_mem_addr_limit);
> + pp->res[0].flags = IORESOURCE_MEM;
> +
> + snprintf(pp->io_space_name, sizeof(pp->io_space_name),
> + "PCIe %d I/O", pp->port);
> + pp->io_space_name[sizeof(pp->io_space_name) - 1] = 0;
> + pp->res[1].name = pp->io_space_name;
> + pp->res[1].start = readl(&app_reg->in_io_addr_start);
> + pp->res[1].end = readl(&app_reg->in_io_addr_limit);
> + pp->res[1].flags = IORESOURCE_IO;
> +
> + if (request_resource(&iomem_resource,&pp->res[0]))
> + panic("can't allocate PCIe Mem space");
> + if (request_resource(&ioport_resource,&pp->res[1]))
> + panic("can't allocate PCIe IO space");
> + }
> +}
> +
> +static struct hw_pci spear13xx_pci;
> +
> +static int pcie_get_payload(struct pci_dev *dev)
> +{
> + int ret, cap;
> + u16 ctl;
> +
> + cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
> + if (!cap)
> + return -EINVAL;
> +
> + ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL,&ctl);
> + if (!ret)
> + ret = 128<< ((ctl& PCI_EXP_DEVCTL_PAYLOAD)>> 5);
> +
> + return ret;
> +}
> +
> +static int pcie_set_payload(struct pci_dev *dev, int rq)
> +{
> + int cap, err = -EINVAL;
> + u16 ctl, v;
> +
> + if (rq< 128 || rq> 4096 || !is_power_of_2(rq))
> + goto out;
> +
> + v = (ffs(rq) - 8)<< 5;
> +
> + cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
> + if (!cap)
> + goto out;
> +
> + err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL,&ctl);
> + if (err)
> + goto out;
> +
> + if ((ctl& PCI_EXP_DEVCTL_PAYLOAD) != v) {
> + ctl&= ~PCI_EXP_DEVCTL_PAYLOAD;
> + ctl |= v;
> + err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
> + }
> +
> +out:
> + return err;
> +}
> +
> +static void set_readrq(struct pci_bus *bus, int rq)
> +{
> + struct pci_dev *dev;
> +
> + list_for_each_entry(dev,&bus->devices, bus_list) {
> + if (rq< pcie_get_readrq(dev))
> + pcie_set_readrq(dev, rq);
> + if (dev->subordinate)
> + set_readrq(dev->subordinate, rq);
> + }
> +}
> +
> +static int get_max_payload(struct pci_bus *bus, int rq)
> +{
> + struct pci_dev *dev;
> + int payload;
> + int max_payload = rq;
> +
> + list_for_each_entry(dev,&bus->devices, bus_list) {
> + payload = pcie_get_payload(dev);
> + if (payload< max_payload)
> + max_payload = payload;
> + if (dev->subordinate)
> + max_payload = get_max_payload(dev->subordinate,
> + max_payload);
> + }
> + return max_payload;
> +}
> +
> +static void set_payload(struct pci_bus *bus, int rq)
> +{
> + struct pci_dev *dev;
> +
> + list_for_each_entry(dev,&bus->devices, bus_list) {
> + pcie_set_payload(dev, rq);
> + if (dev->subordinate)
> + set_payload(dev->subordinate, rq);
> + }
> +}
> +
> +static void __init spear13xx_pcie_postinit(void)
> +{
> + struct hw_pci *hw =&spear13xx_pci;
> + struct pci_sys_data *sys;
> + struct pci_bus *bus;
> + int cap, ctl, payload, readrq;
> + int max_payload = 4096;
> + struct pcie_port *pp;
> +
> + /* allign Max_Payload_Size for all devices to the minimum
> + * Max_Payload_Size of any of the device in tree.
> + * Max_Read_Request_Size of any of the DS device should be less
> + * than or equal to that of RC's Max_Read_Request_Size*/
> +
> + list_for_each_entry(sys,&hw->buses, node) {
> + bus = sys->bus;
> + pp = bus_to_port(bus->number);
> + cap = pci_find_own_capability(pp, PCI_CAP_ID_EXP);
> + spear_dbi_read_reg(pp, cap + PCI_EXP_DEVCTL, 2,&ctl);
> + payload = 128<< ((ctl& PCI_EXP_DEVCTL_PAYLOAD)>> 5);
> + if (payload< max_payload)
> + max_payload = payload;
> + readrq = 128<< ((ctl& PCI_EXP_DEVCTL_READRQ)>> 12);
> + max_payload = get_max_payload(bus, max_payload);
> + set_payload(bus, max_payload);
> + set_readrq(bus, readrq);
> + }
> +}
> +
> +static int __init spear13xx_pcie_setup(int nr, struct pci_sys_data *sys)
> +{
> + struct pcie_port *pp;
> + /*u32 val = 0;*/
> +
> + if (nr>= NUM_PCIE_PORTS)
> + return 0;
> +
> + pp =&pcie_port[nr];
> + if (!(pp->config.is_host))
> + return 0;
> +
> + pp->root_bus_nr = sys->busnr;
> +
> + sys->resource[0] =&pp->res[0];
> + sys->resource[1] =&pp->res[1];
> + sys->resource[2] = NULL;
> +
> + return 1;
> +}
> +
> +static int pcie_valid_config(struct pcie_port *pp, struct pci_bus *bus, int dev)
> +{
> + /*If there is no link, then there is no device*/
> + if (bus->number != pp->root_bus_nr) {
> + if (!spear13xx_pcie_link_up(pp->va_app_base))
> + return 0;
> + }
> + /*
> + * Don't go out when trying to access nonexisting devices
> + * on the local bus.
> + * we have only one slot on each root port.
> + */
> + if (bus->number == pp->root_bus_nr&& dev> 0)
> + return 0;
> +
> + /*do not read more than one device on the bus directly attached
> + * to RC's (Virtual Bridge's) DS side*/
> + if (bus->primary == pp->root_bus_nr&& dev> 0)
> + return 0;
> +
> + return 1;
> +}
> +
> +static int spear13xx_pcie_rd_conf(struct pcie_port *pp, struct pci_bus *bus,
> + u32 devfn, int where, int size, u32 *val)
> +{
> + struct pcie_app_reg __iomem *app_reg = pp->va_app_base;
> + u32 address;
> + u32 armisc;
> +
> + armisc = readl(&app_reg->slv_armisc);
> + armisc&= ~(AXI_OP_TYPE_MASK);
> + if (bus->parent->number == pp->root_bus_nr) {
> + address = (u32)pp->va_cfg0_base | (PCI_FUNC(devfn)<< 16)
> + | (where& 0xFFFC);
> + writel((bus->number<< 24) | (PCI_SLOT(devfn)<< 19),
> + &app_reg->pom_cfg0_addr_start);
> + armisc |= AXI_OP_TYPE_CONFIG_RDRW_TYPE0;
> + } else {
> + address = (u32)pp->va_cfg1_base | (PCI_FUNC(devfn)<< 16)
> + | (where& 0xFFFC);
> + writel((bus->number<< 24) | (PCI_SLOT(devfn)<< 19),
> + &app_reg->pom_cfg1_addr_start);
> + armisc |= AXI_OP_TYPE_CONFIG_RDRW_TYPE1;
> + }
> + writel(armisc,&app_reg->slv_armisc);
> + while (armisc != readl(&app_reg->slv_armisc))
> + ;
> + *val = readl(address);
> + if (size == 1)
> + *val = (*val>> (8 * (where& 3)))& 0xff;
> + else if (size == 2)
> + *val = (*val>> (8 * (where& 3)))& 0xffff;
> +
> + armisc&= ~(AXI_OP_TYPE_MASK);
> + writel(armisc,&app_reg->slv_armisc);
> +
> + return PCIBIOS_SUCCESSFUL;
> +}
> +
> +static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
> + int size, u32 *val)
> +{
> + struct pcie_port *pp = bus_to_port(bus->number);
> + unsigned long flags;
> + int ret;
> +
> + if (pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
> + *val = 0xffffffff;
> + return PCIBIOS_DEVICE_NOT_FOUND;
> + }
> +
> + spin_lock_irqsave(&pp->conf_lock, flags);
> + if (bus->number != pp->root_bus_nr)
> + ret = spear13xx_pcie_rd_conf(pp, bus, devfn, where, size, val);
> + else {
> + spear_dbi_read_reg(pp, where, size, val);
> + ret = 0;
> + }
> + spin_unlock_irqrestore(&pp->conf_lock, flags);
> +
> + return ret;
> +}
> +
> +static int spear13xx_pcie_wr_conf(struct pcie_port *pp, struct pci_bus *bus,
> + u32 devfn, int where, int size, u32 val)
> +{
> + int ret = PCIBIOS_SUCCESSFUL;
> + struct pcie_app_reg __iomem *app_reg = pp->va_app_base;
> + u32 address;
> + u32 awmisc;
> +
> + awmisc = readl(&app_reg->slv_awmisc);
> + awmisc&= ~(AXI_OP_TYPE_MASK);
> +
> + if (bus->parent->number == pp->root_bus_nr) {
> + address = (u32)pp->va_cfg0_base | (PCI_FUNC(devfn)<< 16)
> + | (where& 0xFFFC);
> + writel((bus->number<< 24) | (PCI_SLOT(devfn)<< 19),
> + &app_reg->pom_cfg0_addr_start);
> + awmisc |= AXI_OP_TYPE_CONFIG_RDRW_TYPE0;
> + } else {
> + address = (u32)pp->va_cfg1_base | (PCI_FUNC(devfn)<< 16)
> + | (where& 0xFFFC);
> + writel((bus->number<< 24) | (PCI_SLOT(devfn)<< 19),
> + &app_reg->pom_cfg1_addr_start);
> + awmisc |= AXI_OP_TYPE_CONFIG_RDRW_TYPE1;
> + }
> + writel(awmisc,&app_reg->slv_awmisc);
> + while (awmisc != readl(&app_reg->slv_awmisc))
> + ;
> + if (size == 4)
> + writel(val, address);
> + else if (size == 2)
> + writew(val, address + (where& 2));
> + else if (size == 1)
> + writeb(val, address + (where& 3));
> + else
> + ret = PCIBIOS_BAD_REGISTER_NUMBER;
> +
> + awmisc&= ~(AXI_OP_TYPE_MASK);
> + writel(awmisc,&app_reg->slv_awmisc);
> + return ret;
> +}
> +
> +static int pcie_wr_conf(struct pci_bus *bus, u32 devfn,
> + int where, int size, u32 val)
> +{
> + struct pcie_port *pp = bus_to_port(bus->number);
> + unsigned long flags;
> + int ret;
> +
> + if (pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
> + return PCIBIOS_DEVICE_NOT_FOUND;
> +
> + spin_lock_irqsave(&pp->conf_lock, flags);
> + if (bus->number != pp->root_bus_nr)
> + ret = spear13xx_pcie_wr_conf(pp, bus, devfn, where, size, val);
> + else {
> + spear_dbi_write_reg(pp, where, size, val);
> + ret = 0;
> + }
> + spin_unlock_irqrestore(&pp->conf_lock, flags);
> +
> + return ret;
> +}
> +
> +static struct pci_ops pcie_ops = {
> + .read = pcie_rd_conf,
> + .write = pcie_wr_conf,
> +};
> +
> +static struct pci_bus __init *
> +spear13xx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
> +{
> + struct pci_bus *bus;
> +
> + if ((nr< NUM_PCIE_PORTS)&& pcie_port[nr].config.is_host) {
> + bus = pci_scan_bus(sys->busnr,&pcie_ops, sys);
> + } else {
> + bus = NULL;
> + BUG();
> + }
> +
> + return bus;
> +}
> +
> +static int __init spear13xx_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
> +{
> + struct pcie_port *pp = bus_to_port(dev->bus->number);
> + int irq = (SPEAR_INTX0_BASE + pp->port * SPEAR_NUM_INTX_IRQS + pin - 1);
> +
> + return irq;
> +}
> +
> +static struct hw_pci spear13xx_pci __initdata = {
> + .nr_controllers = NUM_PCIE_PORTS,
> + .preinit = spear13xx_pcie_preinit,
> + .postinit = spear13xx_pcie_postinit,
> + .swizzle = pci_std_swizzle,
> + .setup = spear13xx_pcie_setup,
> + .scan = spear13xx_pcie_scan_bus,
> + .map_irq = spear13xx_pcie_map_irq,
> +};
> +
> +void mask_intx_irq(unsigned int irq)
> +{
> + int irq_offset = (irq - SPEAR_INTX0_BASE) % SPEAR_NUM_INTX_IRQS;
> + int port = (irq - SPEAR_INTX0_BASE) / SPEAR_NUM_INTX_IRQS;
> + struct pcie_port *pp =&pcie_port[port];
> + struct pcie_app_reg __iomem *app_reg = pp->va_app_base;
> +
> + switch (irq_offset) {
> + case 0:
> + writel(readl(&app_reg->int_mask)& ~INTA_CTRL_INT,
> + &app_reg->int_mask);
> + break;
> + case 1:
> + writel(readl(&app_reg->int_mask)& ~INTB_CTRL_INT,
> + &app_reg->int_mask);
> + break;
> + case 2:
> + writel(readl(&app_reg->int_mask)& ~INTC_CTRL_INT,
> + &app_reg->int_mask);
> + break;
> + case 3:
> + writel(readl(&app_reg->int_mask)& ~INTD_CTRL_INT,
> + &app_reg->int_mask);
> + break;
> + }
> +}
> +
> +void unmask_intx_irq(unsigned int irq)
> +{
> + int irq_offset = (irq - SPEAR_INTX0_BASE) % SPEAR_NUM_INTX_IRQS;
> + int port = (irq - SPEAR_INTX0_BASE) / SPEAR_NUM_INTX_IRQS;
> + struct pcie_port *pp =&pcie_port[port];
> + struct pcie_app_reg __iomem *app_reg = pp->va_app_base;
> +
> + switch (irq_offset) {
> + case 0:
> + writel(readl(&app_reg->int_mask) | INTA_CTRL_INT,
> + &app_reg->int_mask);
> + break;
> + case 1:
> + writel(readl(&app_reg->int_mask) | INTB_CTRL_INT,
> + &app_reg->int_mask);
> + break;
> + case 2:
> + writel(readl(&app_reg->int_mask) | INTC_CTRL_INT,
> + &app_reg->int_mask);
> + break;
> + case 3:
> + writel(readl(&app_reg->int_mask) | INTD_CTRL_INT,
> + &app_reg->int_mask);
> + break;
> + }
> +}
> +
> +static struct irq_chip spear13xx_intx_chip = {
> + .name = "PCI-INTX",
> + .mask = mask_intx_irq,
> + .unmask = unmask_intx_irq,
> +};
> +
> +static void spear_pcie_int_handler(unsigned int irq, struct irq_desc *desc)
> +{
> + struct pcie_port *pp =&pcie_port[irq - IRQ_PCIE0];
> + struct pcie_app_reg __iomem *app_reg = pp->va_app_base;
> + unsigned int status;
> +
> + status = readl(&app_reg->int_sts);
> +
> + desc->chip->ack(irq);
> +
> + if (status& MSI_CTRL_INT) {
> +#ifdef CONFIG_PCI_MSI
> + handle_msi(pp);
> +#endif
> + writel(MSI_CTRL_INT,&app_reg->int_clr);
> + } else if (status& INTA_CTRL_INT)
> + generic_handle_irq(SPEAR_INTX0_BASE
> + + pp->port * SPEAR_NUM_INTX_IRQS);
> + else if (status& INTB_CTRL_INT)
> + generic_handle_irq(SPEAR_INTX0_BASE
> + + pp->port * SPEAR_NUM_INTX_IRQS + 1);
> + else if (status& INTC_CTRL_INT)
> + generic_handle_irq(SPEAR_INTX0_BASE
> + + pp->port * SPEAR_NUM_INTX_IRQS + 2);
> + else if (status& INTD_CTRL_INT)
> + generic_handle_irq(SPEAR_INTX0_BASE
> + + pp->port * SPEAR_NUM_INTX_IRQS + 3);
> + else
> + writel(status,&app_reg->int_clr);
> +
> + desc->chip->unmask(irq);
> +}
> +
> +static void spear13xx_int_init(struct pcie_port *pp)
> +{
> + int i, irq;
> + struct pcie_app_reg __iomem *app_reg;
> +
> + set_irq_chained_handler(IRQ_PCIE0 + pp->port, spear_pcie_int_handler);
> +
> +#ifdef CONFIG_PCI_MSI
> + spear13xx_msi_init(pp);
> +#endif
> + /* Enbale INTX interrupt*/
> + app_reg = pp->va_app_base;
> + writel(readl(&app_reg->int_mask) | INTA_CTRL_INT
> + | INTB_CTRL_INT | INTC_CTRL_INT
> + | INTD_CTRL_INT,&app_reg->int_mask);
> +
> + /* initilize INTX chip here only. MSI chip will be
> + * initilized dynamically.*/
> + irq = (SPEAR_INTX0_BASE + pp->port * SPEAR_NUM_INTX_IRQS);
> + for (i = 0; i< SPEAR_NUM_INTX_IRQS; i++) {
> + set_irq_chip_and_handler(irq + i,&spear13xx_intx_chip,
> + handle_simple_irq);
> + set_irq_flags(irq + i, IRQF_VALID);
> + }
> +}
> +
> +static void __init add_pcie_port(int port, void __iomem *base,
> + void __iomem *app_base)
> +{
> + struct pcie_port *pp =&pcie_port[port];
> + struct pcie_app_reg __iomem *app_reg;
> +
> + pp->port = port;
> + pp->root_bus_nr = -1;
> + pp->base = base;
> + pp->app_base = app_base;
> + pp->va_app_base = ioremap((ulong)app_base, 0x200);
> + if (!pp->va_app_base) {
> + pr_err("error with ioremap in function %s\n", __func__);
> + return;
> + }
> + pp->va_dbi_base = ioremap((ulong)base, 0x2000);
> + if (!pp->va_dbi_base) {
> + pr_err("error with ioremap in function %s\n", __func__);
> + return;
> + }
> + spin_lock_init(&pp->conf_lock);
> + memset(pp->res, 0, sizeof(pp->res));
> + pr_info("spear13xx PCIe port %d\n", port);
> + if (spear13xx_pcie_link_up(pp->va_app_base)) {
> + pr_info("link up in bios\n");
> + } else {
> + pr_info("link down in bios\n");
> + spear13xx_pcie_host_init(pp);
> + spear13xx_int_init(pp);
> + app_reg = pp->va_app_base;
> + pp->va_cfg0_base =
> + ioremap(readl(app_reg->in_cfg0_addr_start),
> + IN_CFG0_SIZE);
> + if (!pp->va_cfg0_base) {
> + pr_err("error with ioremap in function %s\n", __func__);
> + return;
> + }
> + pp->va_cfg1_base =
> + ioremap(readl(app_reg->in_cfg1_addr_start),
> + IN_CFG1_SIZE);
> + if (!pp->va_cfg1_base) {
> + pr_err("error with ioremap in function %s\n", __func__);
> + return;
> + }
> +
> + }
> +}
> +
> +static int __init spear13xx_pcie_init(void)
> +{
> + int port;
> + struct clk *clk;
> + struct pcie_port_info *config;
> +
> + for (port = 0; port< NUM_PCIE_PORTS; port++) {
> + /* do not enable clock if it is PCIE0. Ideally , all controller
> + * should have been independent from others with respect to
> + * clock. But PCIE1 and 2 depends on PCIE0.So PCIE0 clk
> + * is provided during board init.*/
> + if (port == 1) {
> + /* Ideally CFG Clock should have been also enabled
> + * here. But it is done currently during board
> + * init routne*/
> + clk = clk_get_sys("pcie1", NULL);
> + if (IS_ERR(clk)) {
> + pr_err("%s:couldn't get clk for pcie1\n",
> + __func__);
> + continue;
> + }
> + if (clk_enable(clk)) {
> + pr_err("%s:couldn't enable clk for pcie1\n",
> + __func__);
> + continue;
> + }
> + } else if (port == 2) {
> + /* Ideally CFG Clock should have been also enabled
> + * here. But it is done currently during board
> + * init routne*/
> + clk = clk_get_sys("pcie2", NULL);
> + if (IS_ERR(clk)) {
> + pr_err("%s:couldn't get clk for pcie2\n",
> + __func__);
> + continue;
> + }
> + if (clk_enable(clk)) {
> + pr_err("%s:couldn't enable clk for pcie2\n",
> + __func__);
> + continue;
> + }
> + }
> +
> + config = (*pcie_port_init)(port);
> + memcpy((void *)&pcie_port[port].config, (void *)config,
> + (sizeof(struct pcie_port_info)));
> +
> + if (pcie_port[port].config.is_host)
> + add_pcie_port(port, pcie_base[port],
> + pcie_app_base[port]);
> + }
> +
> + pci_common_init(&spear13xx_pci);
> + pr_info("pcie init successful\n");
> + return 0;
> +}
> +subsys_initcall(spear13xx_pcie_init);
> diff --git a/arch/arm/mach-spear13xx/spear1300_evb.c b/arch/arm/mach-spear13xx/spear1300_evb.c
> index cba0fee..82aa92d 100644
> --- a/arch/arm/mach-spear13xx/spear1300_evb.c
> +++ b/arch/arm/mach-spear13xx/spear1300_evb.c
> @@ -16,6 +16,7 @@
> #include<asm/mach-types.h>
> #include<mach/generic.h>
> #include<mach/hardware.h>
> +#include<mach/pcie.h>
>
> /* padmux devices to enable */
> static struct pmx_dev *pmx_devs[] = {
> @@ -45,6 +46,37 @@ static struct amba_device *amba_devs[] __initdata = {
> static struct platform_device *plat_devs[] __initdata = {
> };
>
> +#ifdef CONFIG_PCIEPORTBUS
> +static struct pcie_port_info __initdata pcie_port_info[] = {
> + /*pcie0 port info*/
> + {
> + .is_host = 0,
> + }, {
> + /*pcie1 port info*/
> + .is_host = 1,
> + }, {
> + /*pcie2 port info*/
> + .is_host = 1,
> + }
> +};
> +
> +/*
> + * This function is needed for PCIE host and device driver. Same controller can
> + * not be programmed as host as well as device. So host driver must call this
> + * function and if this function returns a configuration structure which tells
> + * that this port should be a host, then only host controller driver should add
> + * that particular port as RC. For a port to be added as device, one must also
> + * add device's information in plat_devs array defined in this file.
> + */
> +static struct pcie_port_info * __init spear1300_pcie_port_init(int port)
> +{
> + if (port< 3)
> + return&pcie_port_info[port];
> + else
> + return NULL;
> +}
> +#endif
> +
> static void __init spear1300_evb_init(void)
> {
> unsigned int i;
> @@ -52,6 +84,12 @@ static void __init spear1300_evb_init(void)
> /* call spear1300 machine init function */
> spear1300_init(NULL, pmx_devs, ARRAY_SIZE(pmx_devs));
>
> +#ifdef CONFIG_PCIEPORTBUS
> + /* Enable PCIE0 clk */
> + enable_pcie0_clk();
> + pcie_init(spear1300_pcie_port_init);
> +#endif
> +
> /* Add Platform Devices */
> platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
>
> diff --git a/arch/arm/mach-spear13xx/spear1310_evb.c b/arch/arm/mach-spear13xx/spear1310_evb.c
> index 62af911..3d7d1c5 100644
> --- a/arch/arm/mach-spear13xx/spear1310_evb.c
> +++ b/arch/arm/mach-spear13xx/spear1310_evb.c
> @@ -16,6 +16,7 @@
> #include<asm/mach-types.h>
> #include<mach/generic.h>
> #include<mach/hardware.h>
> +#include<mach/pcie.h>
>
> /* padmux devices to enable */
> static struct pmx_dev *pmx_devs[] = {
> @@ -64,6 +65,37 @@ static struct platform_device *plat_devs[] __initdata = {
> &spear1310_can1_device,
> };
>
> +#ifdef CONFIG_PCIEPORTBUS
> +static struct pcie_port_info __initdata pcie_port_info[] = {
> + /*pcie0 port info*/
> + {
> + .is_host = 0,
> + }, {
> + /*pcie1 port info*/
> + .is_host = 1,
> + }, {
> + /*pcie2 port info*/
> + .is_host = 1,
> + }
> +};
> +
> +/*
> + * This function is needed for PCIE host and device driver. Same controller can
> + * not be programmed as host as well as device. So host driver must call this
> + * function and if this function returns a configuration structure which tells
> + * that this port should be a host, then only host controller driver should add
> + * that particular port as RC. For a port to be added as device, one must also
> + * add device's information in plat_devs array defined in this file.
> + */
> +static struct pcie_port_info * __init spear1310_pcie_port_init(int port)
> +{
> + if (port< 3)
> + return&pcie_port_info[port];
> + else
> + return NULL;
> +}
> +#endif
> +
> static void __init spear1310_evb_init(void)
> {
> unsigned int i;
> @@ -71,6 +103,12 @@ static void __init spear1310_evb_init(void)
> /* call spear1310 machine init function */
> spear1310_init(NULL, pmx_devs, ARRAY_SIZE(pmx_devs));
>
> +#ifdef CONFIG_PCIEPORTBUS
> + /* Enable PCIE0 clk */
> + enable_pcie0_clk();
> + pcie_init(spear1310_pcie_port_init);
> +#endif
> +
> /* Add Platform Devices */
> platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
>
> diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c
> index 35582a6..6c2525a 100644
> --- a/arch/arm/mach-spear13xx/spear13xx.c
> +++ b/arch/arm/mach-spear13xx/spear13xx.c
> @@ -76,6 +76,34 @@ struct amba_device spear13xx_uart_device = {
> .irq = {IRQ_UART, NO_IRQ},
> };
>
> +#ifdef CONFIG_PCIEPORTBUS
> +/* PCIE0 clock always needs to be enabled if any of the three PCIE port
> + * have to be used. So call this function from the board initilization
> + * file. Ideally , all controller should have been independent from
> + * others with respect to clock.
> + */
> +int enable_pcie0_clk(void)
> +{
> + struct clk *clk;
> + /*Enable all CLK in CFG registers here only. Idealy only PCIE0
> + * should have been enabled. But Controler does not work
> + * properly if PCIE1 and PCIE2's CFG CLK is enabled in stages.
> + */
> + writel(PCIE0_CFG_VAL | PCIE1_CFG_VAL | PCIE2_CFG_VAL, PCIE_CFG);
> + clk = clk_get_sys("pcie0", NULL);
> + if (IS_ERR(clk)) {
> + pr_err("%s:couldn't get clk for pcie0\n", __func__);
> + return -ENODEV;
> + }
> + if (clk_enable(clk)) {
> + pr_err("%s:couldn't enable clk for pcie0\n", __func__);
> + return -ENODEV;
> + }
> +
> + return 0;
> +}
> +#endif
> +
> /* Do spear13xx familiy common initialization part here */
> void __init spear13xx_init(void)
> {
> diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig
> index 29a25d2..ee5fd4a 100644
> --- a/arch/arm/plat-spear/Kconfig
> +++ b/arch/arm/plat-spear/Kconfig
> @@ -12,6 +12,8 @@ config ARCH_SPEAR13XX
> bool "SPEAr13XX"
> select ARM_GIC
> select CPU_V7
> + select ARCH_SUPPORTS_MSI
> + select MIGHT_HAVE_PCI
> help
> Supports for ARM's SPEAR13XX family
>
More information about the linux-arm-kernel
mailing list