[LEDE-DEV] [PATCH v2] ipq806x: Add nand boot support for ipq40xx AP-DK04.1-C1

Ram Chandra Jangir rjangir at codeaurora.org
Thu May 11 09:29:02 PDT 2017


This change add nand boot support for IPQ40xx based
AP-DK04.1-C1 board using ubi image, also add sysupgrage
support for AP-DK04.1-C1 and generates a sysupgrade.tar
image.

Testing:
*Tested on IPQ40xx AP-DK04.1-C1 and IPQ806x AP148 Board:
  a. NAND boot
  b. ubi sysupgrade

Signed-off-by: Ram Chandra Jangir <rjangir at codeaurora.org>
---

Changes since v1:
* added ipq40xx nand pinmux support.

 .../ipq806x/base-files/lib/upgrade/platform.sh     |    1 +
 target/linux/ipq806x/image/Makefile                |    1 +
 ...nd-Add-bam_dma-support-in-qcom_nand-drive.patch |  382 ++++++
 ...nd-Added-bam-transaction-and-support-addi.patch | 1273 ++++++++++++++++++++
 ...gine-qcom-bam_dma-Add-custom-data-mapping.patch |  217 ++++
 ...ts-ipq4019-add-nand-and-qpic-bam-dma-node.patch |  121 ++
 ...-Add-support-to-configure-ipq40xx-GPIO_PU.patch |  261 ++++
 7 files changed, 2256 insertions(+)
 create mode 100644 target/linux/ipq806x/patches-4.9/859-qcom-mtd-nand-Add-bam_dma-support-in-qcom_nand-drive.patch
 create mode 100644 target/linux/ipq806x/patches-4.9/860-qcom-mtd-nand-Added-bam-transaction-and-support-addi.patch
 create mode 100644 target/linux/ipq806x/patches-4.9/861-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch
 create mode 100644 target/linux/ipq806x/patches-4.9/862-dts-ipq4019-add-nand-and-qpic-bam-dma-node.patch
 create mode 100644 target/linux/ipq806x/patches-4.9/863-msm-pinctrl-Add-support-to-configure-ipq40xx-GPIO_PU.patch

diff --git a/target/linux/ipq806x/base-files/lib/upgrade/platform.sh b/target/linux/ipq806x/base-files/lib/upgrade/platform.sh
index 8970285..fd08db3 100644
--- a/target/linux/ipq806x/base-files/lib/upgrade/platform.sh
+++ b/target/linux/ipq806x/base-files/lib/upgrade/platform.sh
@@ -12,6 +12,7 @@ platform_pre_upgrade() {
 
 	case "$board" in
 	ap148 |\
+	ap-dk04.1-c1 |\
 	d7800 |\
 	nbg6817 |\
 	r7500 |\
diff --git a/target/linux/ipq806x/image/Makefile b/target/linux/ipq806x/image/Makefile
index 6ebcde5..c49a73e 100644
--- a/target/linux/ipq806x/image/Makefile
+++ b/target/linux/ipq806x/image/Makefile
@@ -264,6 +264,7 @@ endef
 
 define Device/AP-DK04.1-C1
 	$(call Device/FitImage)
+	$(call Device/UbiFit)
 	DEVICE_DTS := qcom-ipq4019-ap.dk04.1-c1
 	KERNEL_LOADADDR := 0x80208000
 	KERNEL_INSTALL := 1
diff --git a/target/linux/ipq806x/patches-4.9/859-qcom-mtd-nand-Add-bam_dma-support-in-qcom_nand-drive.patch b/target/linux/ipq806x/patches-4.9/859-qcom-mtd-nand-Add-bam_dma-support-in-qcom_nand-drive.patch
new file mode 100644
index 0000000..ad9d1bb
--- /dev/null
+++ b/target/linux/ipq806x/patches-4.9/859-qcom-mtd-nand-Add-bam_dma-support-in-qcom_nand-drive.patch
@@ -0,0 +1,382 @@
+From 074036f9de6b8c5fc642e8e2540950f6a35aa804 Mon Sep 17 00:00:00 2001
+From: Ram Chandra Jangir <rjangir at codeaurora.org>
+Date: Thu, 20 Apr 2017 10:31:10 +0530
+Subject: [PATCH] qcom: mtd: nand: Add bam_dma support in qcom_nand driver
+
+The current driver only support ADM DMA so this patch adds the
+BAM DMA support in current NAND driver with compatible string
+qcom,ebi2-nandc-bam.
+Added bam channels and data buffers, NAND BAM uses 3 channels:
+command, data tx and data rx, while ADM uses only single channel.
+So this patch adds the BAM channel in device tree and using the
+same in NAND driver allocation function.
+
+Signed-off-by: Ram Chandra Jangir <rjangir at codeaurora.org>
+---
+ .../devicetree/bindings/mtd/qcom_nandc.txt         |  69 +++++++--
+ drivers/mtd/nand/qcom_nandc.c                      | 160 +++++++++++++++++----
+ 2 files changed, 190 insertions(+), 39 deletions(-)
+
+diff --git a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
+index 70dd511..9e5c9be 100644
+--- a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
++++ b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
+@@ -1,21 +1,26 @@
+ * Qualcomm NAND controller
+
+ Required properties:
+-- compatible:		should be "qcom,ipq806x-nand"
++- compatible:		"qcom,ipq806x-nand" for IPQ8064 which uses
++			ADM DMA.
++			"qcom,ebi2-nand-bam" - nand drivers using BAM DMA
++			like IPQ4019.
+ - reg:			MMIO address range
+ - clocks:		must contain core clock and always on clock
+ - clock-names:		must contain "core" for the core clock and "aon" for the
+			always on clock
+ - dmas:			DMA specifier, consisting of a phandle to the ADM DMA
+-			controller node and the channel number to be used for
+-			NAND. Refer to dma.txt and qcom_adm.txt for more details
+-- dma-names:		must be "rxtx"
+-- qcom,cmd-crci:	must contain the ADM command type CRCI block instance
+-			number specified for the NAND controller on the given
+-			platform
+-- qcom,data-crci:	must contain the ADM data type CRCI block instance
+-			number specified for the NAND controller on the given
+-			platform
++			or BAM DMA controller node and the channel number to
++			be used for NAND. Refer to dma.txt, qcom_adm.txt(ADM)
++			and qcom_bam_dma.txt(BAM) for more details
++- dma-names:		"rxtx" - ADM
++			"tx", "rx", "cmd" - BAM
++- qcom,cmd-crci:	Only required for ADM DMA. must contain the ADM command
++			type CRCI block instance number specified for the NAND
++			controller on the given platform.
++- qcom,data-crci:	Only required for ADM DMA. must contain the ADM data
++			type CRCI block instance number specified for the NAND
++			controller on the given platform.
+ - #address-cells:	<1> - subnodes give the chip-select number
+ - #size-cells:		<0>
+
+@@ -44,7 +49,7 @@ partition.txt for more detail.
+ Example:
+
+ nand at 1ac00000 {
+-	compatible = "qcom,ebi2-nandc";
++	compatible = "qcom,ipq806x-nand","qcom.qcom_nand";
+	reg = <0x1ac00000 0x800>;
+
+	clocks = <&gcc EBI2_CLK>,
+@@ -84,3 +89,45 @@ nand at 1ac00000 {
+		};
+	};
+ };
++
++nand at 79B0000 {
++	compatible = "qcom,ebi2-nandc-bam";
++	reg = <0x79B0000 0x1000>;
++
++	clocks = <&gcc EBI2_CLK>,
++		 <&gcc EBI2_AON_CLK>;
++	clock-names = "core", "aon";
++
++	dmas = <&qpicbam 0>,
++		<&qpicbam 1>,
++		<&qpicbam 2>;
++	dma-names = "tx", "rx", "cmd";
++
++	#address-cells = <1>;
++	#size-cells = <0>;
++
++	nandcs at 0 {
++		compatible = "qcom,nandcs";
++		reg = <0>;
++
++		nand-ecc-strength = <4>;
++		nand-ecc-step-size = <512>;
++		nand-bus-width = <8>;
++
++		partitions {
++			compatible = "fixed-partitions";
++			#address-cells = <1>;
++			#size-cells = <1>;
++
++			partition at 0 {
++				label = "boot-nand";
++				reg = <0 0x58a0000>;
++			};
++
++			partition at 58a0000 {
++				label = "fs-nand";
++				reg = <0x58a0000 0x4000000>;
++			};
++		};
++	};
++};
+diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
+index 57d483a..76a0ffc 100644
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -226,6 +226,7 @@ struct nandc_regs {
+  *				by upper layers directly
+  * @buf_size/count/start:	markers for chip->read_buf/write_buf functions
+  * @reg_read_buf:		local buffer for reading back registers via DMA
++ * @reg_read_buf_phys:         contains dma address for register read buffer
+  * @reg_read_pos:		marker for data read in reg_read_buf
+  *
+  * @regs:			a contiguous chunk of memory for DMA register
+@@ -234,7 +235,10 @@ struct nandc_regs {
+  * @cmd1/vld:			some fixed controller register values
+  * @ecc_modes:			supported ECC modes by the current controller,
+  *				initialized via DT match data
+- */
++ * @bch_enabled:		flag to tell whether BCH or RS ECC mode is used
++ * @dma_bam_enabled:		flag to tell whether nand controller is using
++ *				bam dma
++*/
+ struct qcom_nand_controller {
+	struct nand_hw_control controller;
+	struct list_head host_list;
+@@ -247,17 +251,28 @@ struct qcom_nand_controller {
+	struct clk *core_clk;
+	struct clk *aon_clk;
+
+-	struct dma_chan *chan;
+-	unsigned int cmd_crci;
+-	unsigned int data_crci;
+	struct list_head desc_list;
++	union {
++		struct {
++			struct dma_chan *tx_chan;
++			struct dma_chan *rx_chan;
++			struct dma_chan *cmd_chan;
++		};
++		struct {
++			struct dma_chan *chan;
++			unsigned int cmd_crci;
++			unsigned int data_crci;
++		};
++	};
+
+	u8		*data_buffer;
++	bool            dma_bam_enabled;
+	int		buf_size;
+	int		buf_count;
+	int		buf_start;
+
+	__le32 *reg_read_buf;
++	dma_addr_t reg_read_buf_phys;
+	int reg_read_pos;
+
+	struct nandc_regs *regs;
+@@ -316,6 +331,17 @@ struct qcom_nand_host {
+	u32 clrreadstatus;
+ };
+
++/*
++ * This data type corresponds to the nand driver data which will be used at
++ * driver probe time
++ * @ecc_modes - ecc mode for nand
++ * @dma_bam_enabled - whether this driver is using bam
++ */
++struct qcom_nand_driver_data {
++	u32 ecc_modes;
++	bool dma_bam_enabled;
++};
++
+ static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+ {
+	return container_of(chip, struct qcom_nand_host, chip);
+@@ -1893,7 +1919,7 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
+				| wide_bus << WIDE_FLASH
+				| 1 << DEV0_CFG1_ECC_DISABLE;
+
+-	host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
++	host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
+				| 0 << ECC_SW_RESET
+				| host->cw_data << ECC_NUM_DATA_BYTES
+				| 1 << ECC_FORCE_CLK_OPEN
+@@ -1942,16 +1968,46 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+	if (!nandc->regs)
+		return -ENOMEM;
+
+-	nandc->reg_read_buf = devm_kzalloc(nandc->dev,
+-				MAX_REG_RD * sizeof(*nandc->reg_read_buf),
+-				GFP_KERNEL);
+-	if (!nandc->reg_read_buf)
+-		return -ENOMEM;
++	if (!nandc->dma_bam_enabled) {
++		nandc->reg_read_buf = devm_kzalloc(nandc->dev,
++					MAX_REG_RD *
++					sizeof(*nandc->reg_read_buf),
++					GFP_KERNEL);
+
+-	nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
+-	if (!nandc->chan) {
+-		dev_err(nandc->dev, "failed to request slave channel\n");
+-		return -ENODEV;
++		if (!nandc->reg_read_buf)
++			return -ENOMEM;
++
++		nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
++		if (!nandc->chan) {
++			dev_err(nandc->dev, "failed to request slave channel\n");
++			return -ENODEV;
++		}
++	} else {
++		nandc->reg_read_buf = dmam_alloc_coherent(nandc->dev,
++					MAX_REG_RD *
++					sizeof(*nandc->reg_read_buf),
++					&nandc->reg_read_buf_phys, GFP_KERNEL);
++
++		if (!nandc->reg_read_buf)
++			return -ENOMEM;
++
++		nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
++		if (!nandc->tx_chan) {
++			dev_err(nandc->dev, "failed to request tx channel\n");
++			return -ENODEV;
++		}
++
++		nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
++		if (!nandc->rx_chan) {
++			dev_err(nandc->dev, "failed to request rx channel\n");
++			return -ENODEV;
++		}
++
++		nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
++		if (!nandc->cmd_chan) {
++			dev_err(nandc->dev, "failed to request cmd channel\n");
++			return -ENODEV;
++		}
+	}
+
+	INIT_LIST_HEAD(&nandc->desc_list);
+@@ -1964,8 +2020,35 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+
+ static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+ {
+-	dma_release_channel(nandc->chan);
+-}
++	if (nandc->dma_bam_enabled) {
++		if (nandc->tx_chan)
++			dma_release_channel(nandc->tx_chan);
++
++		if (nandc->rx_chan)
++			dma_release_channel(nandc->rx_chan);
++
++		if (nandc->cmd_chan)
++			dma_release_channel(nandc->tx_chan);
++
++		if (nandc->reg_read_buf)
++			dmam_free_coherent(nandc->dev, MAX_REG_RD *
++				sizeof(*nandc->reg_read_buf),
++				nandc->reg_read_buf,
++				nandc->reg_read_buf_phys);
++	} else {
++		if (nandc->chan)
++			dma_release_channel(nandc->chan);
++
++		if (nandc->reg_read_buf)
++			devm_kfree(nandc->dev, nandc->reg_read_buf);
++	}
++
++	if (nandc->regs)
++		devm_kfree(nandc->dev, nandc->regs);
++
++	if (nandc->data_buffer)
++		devm_kfree(nandc->dev, nandc->data_buffer);
++ }
+
+ /* one time setup of a few nand controller registers */
+ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+@@ -2002,6 +2085,8 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
+	mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+	mtd->owner = THIS_MODULE;
+	mtd->dev.parent = dev;
++	mtd->priv = chip;
++	chip->priv = nandc;
+
+	chip->cmdfunc		= qcom_nandc_command;
+	chip->select_chip	= qcom_nandc_select_chip;
+@@ -2049,16 +2134,20 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
+	struct device_node *np = nandc->dev->of_node;
+	int ret;
+
+-	ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci);
+-	if (ret) {
+-		dev_err(nandc->dev, "command CRCI unspecified\n");
+-		return ret;
+-	}
++	if (!nandc->dma_bam_enabled) {
++		ret = of_property_read_u32(np, "qcom,cmd-crci",
++				&nandc->cmd_crci);
++		if (ret) {
++			dev_err(nandc->dev, "command CRCI unspecified\n");
++			return ret;
++		}
+
+-	ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci);
+-	if (ret) {
+-		dev_err(nandc->dev, "data CRCI unspecified\n");
+-		return ret;
++		ret = of_property_read_u32(np, "qcom,data-crci",
++				&nandc->data_crci);
++		if (ret) {
++			dev_err(nandc->dev, "data CRCI unspecified\n");
++			return ret;
++		}
+	}
+
+	return 0;
+@@ -2073,6 +2162,7 @@ static int qcom_nandc_probe(struct platform_device *pdev)
+	struct device_node *dn = dev->of_node, *child;
+	struct resource *res;
+	int ret;
++	struct qcom_nand_driver_data *driver_data;
+
+	nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
+	if (!nandc)
+@@ -2087,7 +2177,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
+		return -ENODEV;
+	}
+
+-	nandc->ecc_modes = (unsigned long)dev_data;
++	driver_data = (struct qcom_nand_driver_data *)dev_data;
++
++	nandc->ecc_modes = driver_data->ecc_modes;
++	nandc->dma_bam_enabled = driver_data->dma_bam_enabled;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	nandc->base = devm_ioremap_resource(dev, res);
+@@ -2179,7 +2272,15 @@ static int qcom_nandc_remove(struct platform_device *pdev)
+	return 0;
+ }
+
+-#define EBI2_NANDC_ECC_MODES	(ECC_RS_4BIT | ECC_BCH_8BIT)
++struct qcom_nand_driver_data ebi2_nandc_bam_data = {
++	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
++	.dma_bam_enabled = true,
++};
++
++struct qcom_nand_driver_data ebi2_nandc_data = {
++	.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
++	.dma_bam_enabled = false,
++};
+
+ /*
+  * data will hold a struct pointer containing more differences once we support
+@@ -2187,7 +2288,10 @@ static int qcom_nandc_remove(struct platform_device *pdev)
+  */
+ static const struct of_device_id qcom_nandc_of_match[] = {
+	{	.compatible = "qcom,ipq806x-nand",
+-		.data = (void *)EBI2_NANDC_ECC_MODES,
++		.data = (void *) &ebi2_nandc_data,
++	},
++	{	.compatible = "qcom,ebi2-nandc-bam",
++		.data = (void *) &ebi2_nandc_bam_data,
+	},
+	{}
+ };
+--
+2.7.2
diff --git a/target/linux/ipq806x/patches-4.9/860-qcom-mtd-nand-Added-bam-transaction-and-support-addi.patch b/target/linux/ipq806x/patches-4.9/860-qcom-mtd-nand-Added-bam-transaction-and-support-addi.patch
new file mode 100644
index 0000000..674de77
--- /dev/null
+++ b/target/linux/ipq806x/patches-4.9/860-qcom-mtd-nand-Added-bam-transaction-and-support-addi.patch
@@ -0,0 +1,1273 @@
+From 645c7805f2602569263d7ac78050b2c9e91e3377 Mon Sep 17 00:00:00 2001
+From: Ram Chandra Jangir <rjangir at codeaurora.org>
+Date: Thu, 20 Apr 2017 10:23:00 +0530
+Subject: [PATCH] qcom: mtd: nand: Added bam transaction and support
+ additional CSRs
+
+This patch adds the following for NAND BAM DMA support
+ - Bam transaction which will be used for any NAND request.
+    It contains the array of command elements, command and
+    data sgl. This transaction will be resetted before every
+    request.
+ - Allocation function for NAND BAM transaction which will be
+    called only once at probe time.
+ - Reset function for NAND BAM transaction which will be called
+    before any new NAND request.
+ - Add support for additional CSRs.
+    NAND_READ_LOCATION - page offset for reading in BAM DMA mode
+    NAND_ERASED_CW_DETECT_CFG - status for erased code words
+    NAND_BUFFER_STATUS - status for ECC
+
+Signed-off-by: Abhishek Sahu <absahu at codeaurora.org>
+Signed-off-by: Ram Chandra Jangir <rjangir at codeaurora.org>
+---
+ drivers/mtd/nand/qcom_nandc.c    | 631 +++++++++++++++++++++++++++++++++++----
+ include/linux/dma/qcom_bam_dma.h | 149 +++++++++
+ 2 files changed, 721 insertions(+), 59 deletions(-)
+ create mode 100644 include/linux/dma/qcom_bam_dma.h
+
+diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
+index 76a0ffc..9d941e3 100644
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -22,6 +22,7 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/delay.h>
++#include <linux/dma/qcom_bam_dma.h>
+
+ /* NANDc reg offsets */
+ #define	NAND_FLASH_CMD			0x00
+@@ -53,6 +54,8 @@
+ #define	NAND_VERSION			0xf08
+ #define	NAND_READ_LOCATION_0		0xf20
+ #define	NAND_READ_LOCATION_1		0xf24
++#define	NAND_READ_LOCATION_2		0xf28
++#define	NAND_READ_LOCATION_3		0xf2c
+
+ /* dummy register offsets, used by write_reg_dma */
+ #define	NAND_DEV_CMD1_RESTORE		0xdead
+@@ -131,6 +134,11 @@
+ #define	ERASED_PAGE			(PAGE_ALL_ERASED | PAGE_ERASED)
+ #define	ERASED_CW			(CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+
++/* NAND_READ_LOCATION_n bits */
++#define READ_LOCATION_OFFSET           0
++#define READ_LOCATION_SIZE             16
++#define READ_LOCATION_LAST             31
++
+ /* Version Mask */
+ #define	NAND_VERSION_MAJOR_MASK		0xf0000000
+ #define	NAND_VERSION_MAJOR_SHIFT	28
+@@ -148,6 +156,9 @@
+ #define	FETCH_ID			0xb
+ #define	RESET_DEVICE			0xd
+
++/* NAND_CTRL bits */
++#define        BAM_MODE_EN                     BIT(0)
++
+ /*
+  * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+  * the driver calls the chunks 'step' or 'codeword' interchangeably
+@@ -169,12 +180,77 @@
+ #define	ECC_BCH_4BIT	BIT(2)
+ #define	ECC_BCH_8BIT	BIT(3)
+
++/* Flags used for BAM DMA desc preparation*/
++/* Don't set the EOT in current tx sgl */
++#define DMA_DESC_FLAG_NO_EOT		(0x0001)
++/* Set the NWD flag in current sgl */
++#define DMA_DESC_FLAG_BAM_NWD		(0x0002)
++/* Close current sgl and start writing in another sgl */
++#define DMA_DESC_FLAG_BAM_NEXT_SGL	(0x0004)
++/*
++ * Erased codeword status is being used two times in single transfer so this
++ * flag will determine the current value of erased codeword status register
++ */
++#define DMA_DESC_ERASED_CW_SET		(0x0008)
++
++/* Returns the dma address for reg read buffer */
++#define REG_BUF_DMA_ADDR(chip, vaddr) \
++	((chip)->reg_read_buf_phys + \
++	((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
++
++/* Returns the nand register physical address */
++#define NAND_REG_PHYS_ADDRESS(chip, addr) \
++	((chip)->base_dma + (addr))
++
++/* command element array size in bam transaction */
++#define BAM_CMD_ELEMENT_SIZE	(256)
++/* command sgl size in bam transaction */
++#define BAM_CMD_SGL_SIZE	(256)
++/* data sgl size in bam transaction */
++#define BAM_DATA_SGL_SIZE	(128)
++
++/*
++ * This data type corresponds to the BAM transaction which will be used for any
++ * nand request.
++ * @bam_ce - the array of bam command elements
++ * @cmd_sgl - sgl for nand bam command pipe
++ * @tx_sgl - sgl for nand bam consumer pipe
++ * @rx_sgl - sgl for nand bam producer pipe
++ * @bam_ce_index - the index in bam_ce which is available for next sgl request
++ * @pre_bam_ce_index - the index in bam_ce which marks the start position ce
++ *                     for current sgl. It will be used for size calculation
++ *                     for current sgl
++ * @cmd_sgl_cnt - no of entries in command sgl.
++ * @tx_sgl_cnt - no of entries in tx sgl.
++ * @rx_sgl_cnt - no of entries in rx sgl.
++ */
++struct bam_transaction {
++	struct bam_cmd_element bam_ce[BAM_CMD_ELEMENT_SIZE];
++	struct qcom_bam_sgl cmd_sgl[BAM_CMD_SGL_SIZE];
++	struct qcom_bam_sgl tx_sgl[BAM_DATA_SGL_SIZE];
++	struct qcom_bam_sgl rx_sgl[BAM_DATA_SGL_SIZE];
++	uint32_t bam_ce_index;
++	uint32_t pre_bam_ce_index;
++	uint32_t cmd_sgl_cnt;
++	uint32_t tx_sgl_cnt;
++	uint32_t rx_sgl_cnt;
++};
++
++/**
++ * This data type corresponds to the nand dma descriptor
++ * @list - list for desc_info
++ * @dir - DMA transfer direction
++ * @sgl - sgl which will be used for single sgl dma descriptor
++ * @dma_desc - low level dma engine descriptor
++ * @bam_desc_data - used for bam desc mappings
++ */
+ struct desc_info {
+	struct list_head node;
+
+	enum dma_data_direction dir;
+	struct scatterlist sgl;
+	struct dma_async_tx_descriptor *dma_desc;
++	struct qcom_bam_custom_data bam_desc_data;
+ };
+
+ /*
+@@ -202,6 +278,13 @@ struct nandc_regs {
+	__le32 orig_vld;
+
+	__le32 ecc_buf_cfg;
++	__le32 read_location0;
++	__le32 read_location1;
++	__le32 read_location2;
++	__le32 read_location3;
++
++	__le32 erased_cw_detect_cfg_clr;
++	__le32 erased_cw_detect_cfg_set;
+ };
+
+ /*
+@@ -217,6 +300,7 @@ struct nandc_regs {
+  * @aon_clk:			another controller clock
+  *
+  * @chan:			dma channel
++ * @bam_txn:                   contains the bam transaction address
+  * @cmd_crci:			ADM DMA CRCI for command flow control
+  * @data_crci:			ADM DMA CRCI for data flow control
+  * @desc_list:			DMA descriptor list (list of desc_infos)
+@@ -242,6 +326,7 @@ struct nandc_regs {
+ struct qcom_nand_controller {
+	struct nand_hw_control controller;
+	struct list_head host_list;
++	struct bam_transaction *bam_txn;
+
+	struct device *dev;
+
+@@ -342,6 +427,45 @@ struct qcom_nand_driver_data {
+	bool dma_bam_enabled;
+ };
+
++/* Allocates and Initializes the BAM transaction */
++struct bam_transaction *alloc_bam_transaction(
++	struct qcom_nand_controller *nandc)
++{
++	struct bam_transaction *bam_txn;
++
++	bam_txn = kzalloc(sizeof(*bam_txn), GFP_KERNEL);
++
++	if (!bam_txn)
++		return NULL;
++
++	bam_txn->bam_ce_index = 0;
++	bam_txn->pre_bam_ce_index = 0;
++	bam_txn->cmd_sgl_cnt = 0;
++	bam_txn->tx_sgl_cnt = 0;
++	bam_txn->rx_sgl_cnt = 0;
++
++	qcom_bam_sg_init_table(bam_txn->cmd_sgl, BAM_CMD_SGL_SIZE);
++	qcom_bam_sg_init_table(bam_txn->tx_sgl, BAM_DATA_SGL_SIZE);
++	qcom_bam_sg_init_table(bam_txn->rx_sgl, BAM_DATA_SGL_SIZE);
++
++	return bam_txn;
++}
++
++/* Clears the BAM transaction index */
++void clear_bam_transaction(struct qcom_nand_controller *nandc)
++{
++	struct bam_transaction *bam_txn = nandc->bam_txn;
++
++	if (!nandc->dma_bam_enabled)
++		return;
++
++	bam_txn->bam_ce_index = 0;
++	bam_txn->pre_bam_ce_index = 0;
++	bam_txn->cmd_sgl_cnt = 0;
++	bam_txn->tx_sgl_cnt = 0;
++	bam_txn->rx_sgl_cnt = 0;
++}
++
+ static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+ {
+	return container_of(chip, struct qcom_nand_host, chip);
+@@ -398,6 +522,16 @@ static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+		return &regs->orig_vld;
+	case NAND_EBI2_ECC_BUF_CFG:
+		return &regs->ecc_buf_cfg;
++	case NAND_BUFFER_STATUS:
++		return &regs->clrreadstatus;
++	case NAND_READ_LOCATION_0:
++		return &regs->read_location0;
++	case NAND_READ_LOCATION_1:
++		return &regs->read_location1;
++	case NAND_READ_LOCATION_2:
++		return &regs->read_location2;
++	case NAND_READ_LOCATION_3:
++		return &regs->read_location3;
+	default:
+		return NULL;
+	}
+@@ -439,7 +573,7 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
+ {
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-	u32 cmd, cfg0, cfg1, ecc_bch_cfg;
++	u32 cmd, cfg0, cfg1, ecc_bch_cfg, read_location0;
+
+	if (read) {
+		if (host->use_ecc)
+@@ -456,12 +590,20 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
+
+		cfg1 = host->cfg1;
+		ecc_bch_cfg = host->ecc_bch_cfg;
++		if (read)
++			read_location0 = (0 << READ_LOCATION_OFFSET) |
++				(host->cw_data << READ_LOCATION_SIZE) |
++				(1 << READ_LOCATION_LAST);
+	} else {
+		cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+				(num_cw - 1) << CW_PER_PAGE;
+
+		cfg1 = host->cfg1_raw;
+		ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
++		if (read)
++			read_location0 = (0 << READ_LOCATION_OFFSET) |
++				(host->cw_size << READ_LOCATION_SIZE) |
++				(1 << READ_LOCATION_LAST);
+	}
+
+	nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
+@@ -472,8 +614,104 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
+	nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
+	nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
+	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
++
++	if (read)
++		nandc_set_reg(nandc, NAND_READ_LOCATION_0, read_location0);
+ }
+
++/*
++ * Prepares the command descriptor for BAM DMA which will be used for NAND
++ * register read and write. The command descriptor requires the command
++ * to be formed in command element type so this function uses the command
++ * element from bam transaction ce array and fills the same with required
++ * data. A single SGL can contain multiple command elements so
++ * DMA_DESC_FLAG_BAM_NEXT_SGL will be used for starting the separate SGL
++ * after the current command element.
++ */
++static int prep_dma_desc_command(struct qcom_nand_controller *nandc, bool read,
++					int reg_off, const void *vaddr,
++					int size, unsigned int flags)
++{
++	int bam_ce_size;
++	int i;
++	struct bam_cmd_element *bam_ce_buffer;
++	struct bam_transaction *bam_txn = nandc->bam_txn;
++
++	bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_index];
++
++	/* fill the command desc */
++	for (i = 0; i < size; i++) {
++		if (read) {
++			qcom_prep_bam_ce(&bam_ce_buffer[i],
++				NAND_REG_PHYS_ADDRESS(nandc, reg_off + 4 * i),
++				BAM_READ_COMMAND,
++				REG_BUF_DMA_ADDR(nandc,
++					(unsigned int *)vaddr + i));
++		} else {
++			qcom_prep_bam_ce(&bam_ce_buffer[i],
++				NAND_REG_PHYS_ADDRESS(nandc, reg_off + 4 * i),
++				BAM_WRITE_COMMAND,
++				*((unsigned int *)vaddr + i));
++		}
++	}
++
++	/* use the separate sgl after this command */
++	if (flags & DMA_DESC_FLAG_BAM_NEXT_SGL) {
++		bam_ce_buffer = &bam_txn->bam_ce[bam_txn->pre_bam_ce_index];
++		bam_txn->bam_ce_index += size;
++		bam_ce_size = (bam_txn->bam_ce_index -
++				bam_txn->pre_bam_ce_index) *
++				sizeof(struct bam_cmd_element);
++		sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].sgl,
++				bam_ce_buffer,
++				bam_ce_size);
++		if (flags & DMA_DESC_FLAG_BAM_NWD)
++			bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].dma_flags =
++				DESC_FLAG_NWD | DESC_FLAG_CMD;
++		else
++			bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].dma_flags =
++				DESC_FLAG_CMD;
++
++		bam_txn->cmd_sgl_cnt++;
++		bam_txn->pre_bam_ce_index = bam_txn->bam_ce_index;
++	} else {
++		bam_txn->bam_ce_index += size;
++	}
++
++	return 0;
++}
++
++/*
++ * Prepares the data descriptor for BAM DMA which will be used for NAND
++ * data read and write.
++ */
++static int prep_dma_desc_data_bam(struct qcom_nand_controller *nandc, bool read,
++					int reg_off, const void *vaddr,
++					int size, unsigned int flags)
++{
++	struct bam_transaction *bam_txn = nandc->bam_txn;
++
++	if (read) {
++		sg_set_buf(&bam_txn->rx_sgl[bam_txn->rx_sgl_cnt].sgl,
++				vaddr, size);
++		bam_txn->rx_sgl[bam_txn->rx_sgl_cnt].dma_flags = 0;
++		bam_txn->rx_sgl_cnt++;
++	} else {
++		sg_set_buf(&bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].sgl,
++				vaddr, size);
++		if (flags & DMA_DESC_FLAG_NO_EOT)
++			bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].dma_flags = 0;
++		else
++			bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].dma_flags =
++				DESC_FLAG_EOT;
++
++		bam_txn->tx_sgl_cnt++;
++	}
++
++	return 0;
++}
++
++/* Prepares the dma desciptor for adm dma engine */
+ static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
+			 int reg_off, const void *vaddr, int size,
+			 bool flow_control)
+@@ -552,7 +790,7 @@ static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
+  * @num_regs:		number of registers to read
+  */
+ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+-			int num_regs)
++			int num_regs, unsigned int flags)
+ {
+	bool flow_control = false;
+	void *vaddr;
+@@ -561,10 +799,18 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+		flow_control = true;
+
+-	size = num_regs * sizeof(u32);
+	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+	nandc->reg_read_pos += num_regs;
+
++	if (nandc->dma_bam_enabled) {
++		size = num_regs;
++
++		return prep_dma_desc_command(nandc, true, first, vaddr, size,
++						flags);
++	}
++
++	size = num_regs * sizeof(u32);
++
+	return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
+ }
+
+@@ -576,7 +822,7 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+  * @num_regs:		number of registers to write
+  */
+ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+-			 int num_regs)
++			 int num_regs, unsigned int flags)
+ {
+	bool flow_control = false;
+	struct nandc_regs *regs = nandc->regs;
+@@ -588,12 +834,29 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+	if (first == NAND_FLASH_CMD)
+		flow_control = true;
+
++	if (first == NAND_ERASED_CW_DETECT_CFG) {
++		if (flags & DMA_DESC_ERASED_CW_SET)
++			vaddr = &regs->erased_cw_detect_cfg_set;
++		else
++			vaddr = &regs->erased_cw_detect_cfg_clr;
++	}
++
++	if (first == NAND_EXEC_CMD)
++		flags |= DMA_DESC_FLAG_BAM_NWD;
++
+	if (first == NAND_DEV_CMD1_RESTORE)
+		first = NAND_DEV_CMD1;
+
+	if (first == NAND_DEV_CMD_VLD_RESTORE)
+		first = NAND_DEV_CMD_VLD;
+
++	if (nandc->dma_bam_enabled) {
++		size = num_regs;
++
++		return prep_dma_desc_command(nandc, false, first, vaddr, size,
++						flags);
++	}
++
+	size = num_regs * sizeof(u32);
+
+	return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
+@@ -608,8 +871,12 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+  * @size:		DMA transaction size in bytes
+  */
+ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-			 const u8 *vaddr, int size)
++			 const u8 *vaddr, int size, unsigned int flags)
+ {
++	if (nandc->dma_bam_enabled)
++		return prep_dma_desc_data_bam(nandc, true, reg_off, vaddr, size,
++						flags);
++
+	return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
+ }
+
+@@ -622,8 +889,12 @@ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+  * @size:		DMA transaction size in bytes
+  */
+ static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-			  const u8 *vaddr, int size)
++			  const u8 *vaddr, int size, unsigned int flags)
+ {
++	if (nandc->dma_bam_enabled)
++		return prep_dma_desc_data_bam(nandc, false, reg_off, vaddr,
++							size, flags);
++
+	return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
+ }
+
+@@ -633,14 +904,57 @@ static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+  */
+ static void config_cw_read(struct qcom_nand_controller *nandc)
+ {
+-	write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+-	write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
+-	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
+
+-	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
++	write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
++	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
++	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++
++	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
++	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
++				DMA_DESC_ERASED_CW_SET);
++	if (nandc->dma_bam_enabled)
++		write_reg_dma(nandc, NAND_READ_LOCATION_0, 1,
++				DMA_DESC_FLAG_BAM_NEXT_SGL);
++
+
+-	read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
+-	read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
++	write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NWD |
++				DMA_DESC_FLAG_BAM_NEXT_SGL);
++
++	read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
++	read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
++				DMA_DESC_FLAG_BAM_NEXT_SGL);
++}
++
++/*
++ * Helpers to prepare DMA descriptors for configuring registers
++ * before reading a NAND page with BAM.
++ */
++static void config_bam_page_read(struct qcom_nand_controller *nandc)
++{
++	write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
++	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
++	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
++	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
++				DMA_DESC_ERASED_CW_SET |
++				DMA_DESC_FLAG_BAM_NEXT_SGL);
++}
++
++/*
++ * Helpers to prepare DMA descriptors for configuring registers
++ * before reading each codeword in NAND page with BAM.
++ */
++static void config_bam_cw_read(struct qcom_nand_controller *nandc)
++{
++	if (nandc->dma_bam_enabled)
++		write_reg_dma(nandc, NAND_READ_LOCATION_0, 4, 0);
++
++	write_reg_dma(nandc, NAND_FLASH_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
++	write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
++
++	read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
++	read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
++				DMA_DESC_FLAG_BAM_NEXT_SGL);
+ }
+
+ /*
+@@ -649,19 +963,20 @@ static void config_cw_read(struct qcom_nand_controller *nandc)
+  */
+ static void config_cw_write_pre(struct qcom_nand_controller *nandc)
+ {
+-	write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+-	write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
+-	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
++	write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0);
++	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
++	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
++				DMA_DESC_FLAG_BAM_NEXT_SGL);
+ }
+
+ static void config_cw_write_post(struct qcom_nand_controller *nandc)
+ {
+-	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
++	write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
+
+-	read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
++	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
+
+-	write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+-	write_reg_dma(nandc, NAND_READ_STATUS, 1);
++	write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
++	write_reg_dma(nandc, NAND_READ_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
+ }
+
+ /*
+@@ -675,6 +990,8 @@ static int nandc_param(struct qcom_nand_host *host)
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
++	clear_bam_transaction(nandc);
++
+	/*
+	 * NAND_CMD_PARAM is called before we know much about the FLASH chip
+	 * in use. we configure the controller to perform a raw read of 512
+@@ -708,9 +1025,13 @@ static int nandc_param(struct qcom_nand_host *host)
+
+	nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+	nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
++	nandc_set_reg(nandc, NAND_READ_LOCATION_0,
++				(0 << READ_LOCATION_OFFSET) |
++				(512 << READ_LOCATION_SIZE) |
++				(1 << READ_LOCATION_LAST));
+
+-	write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
+-	write_reg_dma(nandc, NAND_DEV_CMD1, 1);
++	write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
++	write_reg_dma(nandc, NAND_DEV_CMD1, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
+
+	nandc->buf_count = 512;
+	memset(nandc->data_buffer, 0xff, nandc->buf_count);
+@@ -718,11 +1039,12 @@ static int nandc_param(struct qcom_nand_host *host)
+	config_cw_read(nandc);
+
+	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+-		      nandc->buf_count);
++		      nandc->buf_count, 0);
+
+	/* restore CMD1 and VLD regs */
+-	write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
+-	write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
++	write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
++	write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1,
++				DMA_DESC_FLAG_BAM_NEXT_SGL);
+
+	return 0;
+ }
+@@ -733,6 +1055,8 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
++	clear_bam_transaction(nandc);
++
+	nandc_set_reg(nandc, NAND_FLASH_CMD,
+		      BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
+	nandc_set_reg(nandc, NAND_ADDR0, page_addr);
+@@ -744,14 +1068,15 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
+	nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
+	nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
+
+-	write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+-	write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
+-	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+-	read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
++	write_reg_dma(nandc, NAND_FLASH_CMD, 3, DMA_DESC_FLAG_BAM_NEXT_SGL);
++	write_reg_dma(nandc, NAND_DEV0_CFG0, 2, DMA_DESC_FLAG_BAM_NEXT_SGL);
++	write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
+
+-	write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+-	write_reg_dma(nandc, NAND_READ_STATUS, 1);
++	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
++
++	write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
++	write_reg_dma(nandc, NAND_READ_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
+
+	return 0;
+ }
+@@ -765,16 +1090,19 @@ static int read_id(struct qcom_nand_host *host, int column)
+	if (column == -1)
+		return 0;
+
++	clear_bam_transaction(nandc);
++
+	nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
+	nandc_set_reg(nandc, NAND_ADDR0, column);
+	nandc_set_reg(nandc, NAND_ADDR1, 0);
+-	nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
++	nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
++			nandc->dma_bam_enabled ? 0 : DM_EN);
+	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+-	write_reg_dma(nandc, NAND_FLASH_CMD, 4);
+-	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
++	write_reg_dma(nandc, NAND_FLASH_CMD, 4, DMA_DESC_FLAG_BAM_NEXT_SGL);
++	write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
+
+-	read_reg_dma(nandc, NAND_READ_ID, 1);
++	read_reg_dma(nandc, NAND_READ_ID, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
+
+	return 0;
+ }
+@@ -785,15 +1113,61 @@ static int reset(struct qcom_nand_host *host)
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
++	clear_bam_transaction(nandc);
++
+	nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
+	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+-	write_reg_dma(nandc, NAND_FLASH_CMD, 1);
+-	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
++	write_reg_dma(nandc, NAND_FLASH_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
++	write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
++
++	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL);
++
++	return 0;
++}
++
++static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++				struct dma_chan *chan,
++				struct qcom_bam_sgl *bam_sgl,
++				int sgl_cnt,
++				enum dma_transfer_direction direction)
++{
++	struct desc_info *desc;
++	struct dma_async_tx_descriptor *dma_desc;
++
++	if (!qcom_bam_map_sg(nandc->dev, bam_sgl, sgl_cnt, direction)) {
++		dev_err(nandc->dev, "failure in mapping sgl\n");
++		return -ENOMEM;
++	}
++
++	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
++	if (!desc) {
++		qcom_bam_unmap_sg(nandc->dev, bam_sgl, sgl_cnt, direction);
++		return -ENOMEM;
++	}
++
++
++	desc->bam_desc_data.dir = direction;
++	desc->bam_desc_data.sgl_cnt = sgl_cnt;
++	desc->bam_desc_data.bam_sgl = bam_sgl;
++
++	dma_desc = dmaengine_prep_dma_custom_mapping(chan,
++			&desc->bam_desc_data,
++			0);
++
++	if (!dma_desc) {
++		dev_err(nandc->dev, "failure in cmd prep desc\n");
++		qcom_bam_unmap_sg(nandc->dev, bam_sgl, sgl_cnt, direction);
++		kfree(desc);
++		return -EINVAL;
++	}
++
++	desc->dma_desc = dma_desc;
+
+-	read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
++	list_add_tail(&desc->node, &nandc->desc_list);
+
+	return 0;
++
+ }
+
+ /* helpers to submit/free our list of dma descriptors */
+@@ -801,12 +1175,46 @@ static int submit_descs(struct qcom_nand_controller *nandc)
+ {
+	struct desc_info *desc;
+	dma_cookie_t cookie = 0;
++	struct bam_transaction *bam_txn = nandc->bam_txn;
++	int r;
++
++	if (nandc->dma_bam_enabled) {
++		if (bam_txn->rx_sgl_cnt) {
++			r = prepare_bam_async_desc(nandc, nandc->rx_chan,
++				bam_txn->rx_sgl, bam_txn->rx_sgl_cnt,
++				DMA_DEV_TO_MEM);
++			if (r)
++				return r;
++		}
++
++		if (bam_txn->tx_sgl_cnt) {
++			r = prepare_bam_async_desc(nandc, nandc->tx_chan,
++				bam_txn->tx_sgl, bam_txn->tx_sgl_cnt,
++				DMA_MEM_TO_DEV);
++			if (r)
++				return r;
++		}
++
++		r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
++			bam_txn->cmd_sgl, bam_txn->cmd_sgl_cnt,
++			DMA_MEM_TO_DEV);
++		if (r)
++			return r;
++	}
+
+	list_for_each_entry(desc, &nandc->desc_list, node)
+		cookie = dmaengine_submit(desc->dma_desc);
+
+-	if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+-		return -ETIMEDOUT;
++	if (nandc->dma_bam_enabled) {
++		dma_async_issue_pending(nandc->tx_chan);
++		dma_async_issue_pending(nandc->rx_chan);
++
++		if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
++			return -ETIMEDOUT;
++	} else {
++		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
++			return -ETIMEDOUT;
++	}
+
+	return 0;
+ }
+@@ -817,7 +1225,16 @@ static void free_descs(struct qcom_nand_controller *nandc)
+
+	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+		list_del(&desc->node);
+-		dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
++
++		if (nandc->dma_bam_enabled)
++			qcom_bam_unmap_sg(nandc->dev,
++				desc->bam_desc_data.bam_sgl,
++				desc->bam_desc_data.sgl_cnt,
++				desc->bam_desc_data.dir);
++		else
++			dma_unmap_sg(nandc->dev, &desc->sgl, 1,
++				desc->dir);
++
+		kfree(desc);
+	}
+ }
+@@ -1128,6 +1545,9 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int i, ret;
+
++	if (nandc->dma_bam_enabled)
++		config_bam_page_read(nandc);
++
+	/* queue cmd descs for each codeword */
+	for (i = 0; i < ecc->steps; i++) {
+		int data_size, oob_size;
+@@ -1141,11 +1561,36 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+			oob_size = host->ecc_bytes_hw + host->spare_bytes;
+		}
+
+-		config_cw_read(nandc);
++		if (nandc->dma_bam_enabled) {
++			if (data_buf && oob_buf) {
++				nandc_set_reg(nandc, NAND_READ_LOCATION_0,
++					(0 << READ_LOCATION_OFFSET) |
++					(data_size << READ_LOCATION_SIZE) |
++					(0 << READ_LOCATION_LAST));
++				nandc_set_reg(nandc, NAND_READ_LOCATION_1,
++					(data_size << READ_LOCATION_OFFSET) |
++					(oob_size << READ_LOCATION_SIZE) |
++					(1 << READ_LOCATION_LAST));
++			} else if (data_buf) {
++				nandc_set_reg(nandc, NAND_READ_LOCATION_0,
++					(0 << READ_LOCATION_OFFSET) |
++					(data_size << READ_LOCATION_SIZE) |
++					(1 << READ_LOCATION_LAST));
++			} else {
++				nandc_set_reg(nandc, NAND_READ_LOCATION_0,
++					(data_size << READ_LOCATION_OFFSET) |
++					(oob_size << READ_LOCATION_SIZE) |
++					(1 << READ_LOCATION_LAST));
++			}
++
++			config_bam_cw_read(nandc);
++		} else {
++			config_cw_read(nandc);
++		}
+
+		if (data_buf)
+			read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+-				      data_size);
++				      data_size, 0);
+
+		/*
+		 * when ecc is enabled, the controller doesn't read the real
+@@ -1161,7 +1606,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+				*oob_buf++ = 0xff;
+
+			read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+-				      oob_buf, oob_size);
++				      oob_buf, oob_size, 0);
+		}
+
+		if (data_buf)
+@@ -1200,10 +1645,14 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
+
+	set_address(host, host->cw_size * (ecc->steps - 1), page);
+	update_rw_regs(host, 1, true);
++	nandc_set_reg(nandc, NAND_READ_LOCATION_0,
++			(0 << READ_LOCATION_OFFSET) |
++			(size << READ_LOCATION_SIZE) |
++			(1 << READ_LOCATION_LAST));
+
+	config_cw_read(nandc);
+
+-	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
++	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+
+	ret = submit_descs(nandc);
+	if (ret)
+@@ -1226,6 +1675,7 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+	data_buf = buf;
+	oob_buf = oob_required ? chip->oob_poi : NULL;
+
++	clear_bam_transaction(nandc);
+	ret = read_page_ecc(host, data_buf, oob_buf);
+	if (ret) {
+		dev_err(nandc->dev, "failure to read page\n");
+@@ -1245,13 +1695,19 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
+	u8 *data_buf, *oob_buf;
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int i, ret;
++	int read_location;
+
+	data_buf = buf;
+	oob_buf = chip->oob_poi;
+
+	host->use_ecc = false;
++
++	clear_bam_transaction(nandc);
+	update_rw_regs(host, ecc->steps, true);
+
++	if (nandc->dma_bam_enabled)
++		config_bam_page_read(nandc);
++
+	for (i = 0; i < ecc->steps; i++) {
+		int data_size1, data_size2, oob_size1, oob_size2;
+		int reg_off = FLASH_BUF_ACC;
+@@ -1269,21 +1725,49 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
+			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+		}
+
+-		config_cw_read(nandc);
++		if (nandc->dma_bam_enabled) {
++			read_location = 0;
++			nandc_set_reg(nandc, NAND_READ_LOCATION_0,
++				(read_location << READ_LOCATION_OFFSET) |
++				(data_size1 << READ_LOCATION_SIZE) |
++				(0 << READ_LOCATION_LAST));
++			read_location += data_size1;
++
++			nandc_set_reg(nandc, NAND_READ_LOCATION_1,
++				(read_location << READ_LOCATION_OFFSET) |
++				(oob_size1 << READ_LOCATION_SIZE) |
++				(0 << READ_LOCATION_LAST));
++			read_location += oob_size1;
++
++			nandc_set_reg(nandc, NAND_READ_LOCATION_2,
++				(read_location << READ_LOCATION_OFFSET) |
++				(data_size2 << READ_LOCATION_SIZE) |
++				(0 << READ_LOCATION_LAST));
++			read_location += data_size2;
++
++			nandc_set_reg(nandc, NAND_READ_LOCATION_3,
++				(read_location << READ_LOCATION_OFFSET) |
++				(oob_size2 << READ_LOCATION_SIZE) |
++				(1 << READ_LOCATION_LAST));
++
++			config_bam_cw_read(nandc);
++		} else {
++			config_cw_read(nandc);
++		}
+
+-		read_data_dma(nandc, reg_off, data_buf, data_size1);
++		read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+		reg_off += data_size1;
+		data_buf += data_size1;
+
+-		read_data_dma(nandc, reg_off, oob_buf, oob_size1);
++		read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+		reg_off += oob_size1;
+		oob_buf += oob_size1;
+
+-		read_data_dma(nandc, reg_off, data_buf, data_size2);
++		read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
+		reg_off += data_size2;
+		data_buf += data_size2;
+
+-		read_data_dma(nandc, reg_off, oob_buf, oob_size2);
++		read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+		oob_buf += oob_size2;
+	}
+
+@@ -1306,6 +1790,7 @@ static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+	int ret;
+
+	clear_read_regs(nandc);
++	clear_bam_transaction(nandc);
+
+	host->use_ecc = true;
+	set_address(host, 0, page);
+@@ -1329,6 +1814,7 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+	int i, ret;
+
+	clear_read_regs(nandc);
++	clear_bam_transaction(nandc);
+
+	data_buf = (u8 *)buf;
+	oob_buf = chip->oob_poi;
+@@ -1350,7 +1836,8 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+
+		config_cw_write_pre(nandc);
+
+-		write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
++		write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
++				i == (ecc->steps - 1) ? DMA_DESC_FLAG_NO_EOT : 0);
+
+		/*
+		 * when ECC is enabled, we don't really need to write anything
+@@ -1363,7 +1850,7 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+			oob_buf += host->bbm_size;
+
+			write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+-				       oob_buf, oob_size);
++				       oob_buf, oob_size, 0);
+		}
+
+		config_cw_write_post(nandc);
+@@ -1393,6 +1880,7 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
+	int i, ret;
+
+	clear_read_regs(nandc);
++	clear_bam_transaction(nandc);
+
+	data_buf = (u8 *)buf;
+	oob_buf = chip->oob_poi;
+@@ -1419,19 +1907,22 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
+
+		config_cw_write_pre(nandc);
+
+-		write_data_dma(nandc, reg_off, data_buf, data_size1);
++		write_data_dma(nandc, reg_off, data_buf, data_size1,
++					DMA_DESC_FLAG_NO_EOT);
+		reg_off += data_size1;
+		data_buf += data_size1;
+
+-		write_data_dma(nandc, reg_off, oob_buf, oob_size1);
++		write_data_dma(nandc, reg_off, oob_buf, oob_size1,
++					DMA_DESC_FLAG_NO_EOT);
+		reg_off += oob_size1;
+		oob_buf += oob_size1;
+
+-		write_data_dma(nandc, reg_off, data_buf, data_size2);
++		write_data_dma(nandc, reg_off, data_buf, data_size2,
++					DMA_DESC_FLAG_NO_EOT);
+		reg_off += data_size2;
+		data_buf += data_size2;
+
+-		write_data_dma(nandc, reg_off, oob_buf, oob_size2);
++		write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+		oob_buf += oob_size2;
+
+		config_cw_write_post(nandc);
+@@ -1467,6 +1958,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+
+	host->use_ecc = true;
+
++	clear_bam_transaction(nandc);
+	ret = copy_last_cw(host, page);
+	if (ret)
+		return ret;
+@@ -1486,7 +1978,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+
+	config_cw_write_pre(nandc);
+	write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+-		       data_size + oob_size);
++		       data_size + oob_size, 0);
+	config_cw_write_post(nandc);
+
+	ret = submit_descs(nandc);
+@@ -1524,6 +2016,7 @@ static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
+	 */
+	host->use_ecc = false;
+
++	clear_bam_transaction(nandc);
+	ret = copy_last_cw(host, page);
+	if (ret)
+		goto err;
+@@ -1554,6 +2047,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
+	int page, ret, status = 0;
+
+	clear_read_regs(nandc);
++	clear_bam_transaction(nandc);
+
+	/*
+	 * to mark the BBM as bad, we flash the entire last codeword with 0s.
+@@ -1570,7 +2064,8 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
+	update_rw_regs(host, 1, false);
+
+	config_cw_write_pre(nandc);
+-	write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
++	write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
++				host->cw_size, 0);
+	config_cw_write_post(nandc);
+
+	ret = submit_descs(nandc);
+@@ -1930,6 +2425,8 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
+
+	host->clrflashstatus = FS_READY_BSY_N;
+	host->clrreadstatus = 0xc0;
++	nandc->regs->erased_cw_detect_cfg_clr = CLR_ERASED_PAGE_DET;
++	nandc->regs->erased_cw_detect_cfg_set = SET_ERASED_PAGE_DET;
+
+	dev_dbg(nandc->dev,
+		"cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
+@@ -2008,6 +2505,12 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+			dev_err(nandc->dev, "failed to request cmd channel\n");
+			return -ENODEV;
+		}
++
++		nandc->bam_txn = alloc_bam_transaction(nandc);
++		if (!nandc->bam_txn) {
++			dev_err(nandc->dev, "failed to allocate bam transaction\n");
++			return -ENOMEM;
++		}
+	}
+
+	INIT_LIST_HEAD(&nandc->desc_list);
+@@ -2043,6 +2546,9 @@ static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+			devm_kfree(nandc->dev, nandc->reg_read_buf);
+	}
+
++	if (nandc->bam_txn)
++		devm_kfree(nandc->dev, nandc->bam_txn);
++
+	if (nandc->regs)
+		devm_kfree(nandc->dev, nandc->regs);
+
+@@ -2053,11 +2559,18 @@ static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+ /* one time setup of a few nand controller registers */
+ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ {
++	u32 nand_ctrl;
++
+	/* kill onenand */
+	nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+
+-	/* enable ADM DMA */
+-	nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
++	/* enable ADM or BAM DMA */
++	if (!nandc->dma_bam_enabled) {
++		nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
++	} else {
++		nand_ctrl = nandc_read(nandc, NAND_CTRL);
++		nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
++	}
+
+	/* save the original values of these registers */
+	nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
+diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h
+new file mode 100644
+index 0000000..7e87a85
+--- /dev/null
++++ b/include/linux/dma/qcom_bam_dma.h
+@@ -0,0 +1,149 @@
++/*
++ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#ifndef _QCOM_BAM_DMA_H
++#define _QCOM_BAM_DMA_H
++
++#include <linux/dma-mapping.h>
++
++#define DESC_FLAG_INT BIT(15)
++#define DESC_FLAG_EOT BIT(14)
++#define DESC_FLAG_EOB BIT(13)
++#define DESC_FLAG_NWD BIT(12)
++#define DESC_FLAG_CMD BIT(11)
++
++/*
++ * QCOM BAM DMA SGL struct
++ *
++ * @sgl: DMA SGL
++ * @dma_flags: BAM DMA flags
++ */
++struct qcom_bam_sgl {
++	struct scatterlist sgl;
++	unsigned int dma_flags;
++};
++
++/*
++ * This data type corresponds to the native Command Element
++ * supported by BAM DMA Engine.
++ *
++ * @addr - register address.
++ * @command - command type.
++ * @data - for write command: content to be written into peripheral register.
++ *	 for read command: dest addr to write peripheral register value to.
++ * @mask - register mask.
++ * @reserved - for future usage.
++ *
++ */
++struct bam_cmd_element {
++	__le32 addr:24;
++	__le32 command:8;
++	__le32 data;
++	__le32 mask;
++	__le32 reserved;
++};
++
++/*
++ * This enum indicates the command type in a command element
++ */
++enum bam_command_type {
++	BAM_WRITE_COMMAND = 0,
++	BAM_READ_COMMAND,
++};
++
++/*
++ * qcom_bam_sg_init_table - Init QCOM BAM SGL
++ * @bam_sgl: bam sgl
++ * @nents: number of entries in bam sgl
++ *
++ * This function performs the initialization for each SGL in BAM SGL
++ * with generic SGL API.
++ */
++static inline void qcom_bam_sg_init_table(struct qcom_bam_sgl *bam_sgl,
++		unsigned int nents)
++{
++	int i;
++
++	for (i = 0; i < nents; i++)
++		sg_init_table(&bam_sgl[i].sgl, 1);
++}
++
++/*
++ * qcom_bam_unmap_sg - Unmap QCOM BAM SGL
++ * @dev: device for which unmapping needs to be done
++ * @bam_sgl: bam sgl
++ * @nents: number of entries in bam sgl
++ * @dir: dma transfer direction
++ *
++ * This function performs the DMA unmapping for each SGL in BAM SGL
++ * with generic SGL API.
++ */
++static inline void qcom_bam_unmap_sg(struct device *dev,
++	struct qcom_bam_sgl *bam_sgl, int nents, enum dma_data_direction dir)
++{
++	int i;
++
++	for (i = 0; i < nents; i++)
++		dma_unmap_sg(dev, &bam_sgl[i].sgl, 1, dir);
++}
++
++/*
++ * qcom_bam_map_sg - Map QCOM BAM SGL
++ * @dev: device for which mapping needs to be done
++ * @bam_sgl: bam sgl
++ * @nents: number of entries in bam sgl
++ * @dir: dma transfer direction
++ *
++ * This function performs the DMA mapping for each SGL in BAM SGL
++ * with generic SGL API.
++ *
++ * returns 0 on error and > 0 on success
++ */
++static inline int qcom_bam_map_sg(struct device *dev,
++	struct qcom_bam_sgl *bam_sgl, int nents, enum dma_data_direction dir)
++{
++	int i, ret = 0;
++
++	for (i = 0; i < nents; i++) {
++		ret = dma_map_sg(dev, &bam_sgl[i].sgl, 1, dir);
++		if (!ret)
++			break;
++	}
++
++	/* unmap the mapped sgl from previous loop in case of error */
++	if (!ret)
++		qcom_bam_unmap_sg(dev, bam_sgl, i, dir);
++
++	return ret;
++}
++
++/*
++ * qcom_prep_bam_ce - Wrapper function to prepare a single BAM command element
++ *	with the data that is passed to this function.
++ * @bam_ce: bam command element
++ * @addr: target address
++ * @command: command in bam_command_type
++ * @data: actual data for write and dest addr for read
++ */
++static inline void qcom_prep_bam_ce(struct bam_cmd_element *bam_ce,
++				uint32_t addr, uint32_t command, uint32_t data)
++{
++	bam_ce->addr = cpu_to_le32(addr);
++	bam_ce->command = cpu_to_le32(command);
++	bam_ce->data = cpu_to_le32(data);
++	bam_ce->mask = 0xFFFFFFFF;
++}
++#endif
+--
+2.7.2
diff --git a/target/linux/ipq806x/patches-4.9/861-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch b/target/linux/ipq806x/patches-4.9/861-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch
new file mode 100644
index 0000000..3b26120
--- /dev/null
+++ b/target/linux/ipq806x/patches-4.9/861-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch
@@ -0,0 +1,217 @@
+From 5a7ccdf845d64b385affdcffaf2defbe9848be15 Mon Sep 17 00:00:00 2001
+From: Ram Chandra Jangir <rjangir at codeaurora.org>
+Date: Thu, 20 Apr 2017 10:39:00 +0530
+Subject: [PATCH] dmaengine: qcom: bam_dma: Add custom data mapping
+
+Add a new function to support for preparing DMA descriptor
+for custom data.
+
+Signed-off-by: Abhishek Sahu <absahu at codeaurora.org>
+Signed-off-by: Ram Chandra Jangir <rjangir at codeaurora.org>
+---
+ drivers/dma/qcom/bam_dma.c       | 97 +++++++++++++++++++++++++++++++++++++---
+ include/linux/dma/qcom_bam_dma.h | 14 ++++++
+ include/linux/dmaengine.h        | 14 ++++++
+ 3 files changed, 119 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
+index 03c4eb3..bde8d70 100644
+--- a/drivers/dma/qcom/bam_dma.c
++++ b/drivers/dma/qcom/bam_dma.c
+@@ -49,6 +49,7 @@
+ #include <linux/clk.h>
+ #include <linux/dmaengine.h>
+ #include <linux/pm_runtime.h>
++#include <linux/dma/qcom_bam_dma.h>
+
+ #include "../dmaengine.h"
+ #include "../virt-dma.h"
+@@ -61,11 +62,6 @@ struct bam_desc_hw {
+
+ #define BAM_DMA_AUTOSUSPEND_DELAY 100
+
+-#define DESC_FLAG_INT BIT(15)
+-#define DESC_FLAG_EOT BIT(14)
+-#define DESC_FLAG_EOB BIT(13)
+-#define DESC_FLAG_NWD BIT(12)
+-
+ struct bam_async_desc {
+	struct virt_dma_desc vd;
+
+@@ -670,6 +666,93 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
+ }
+
+ /**
++ * bam_prep_dma_custom_mapping - Prep DMA descriptor from custom data
++ *
++ * @chan: dma channel
++ * @data: custom data
++ * @flags: DMA flags
++ */
++static struct dma_async_tx_descriptor *bam_prep_dma_custom_mapping(
++		struct dma_chan *chan,
++		void *data, unsigned long flags)
++{
++	struct bam_chan *bchan = to_bam_chan(chan);
++	struct bam_device *bdev = bchan->bdev;
++	struct bam_async_desc *async_desc;
++	struct qcom_bam_custom_data *desc_data = data;
++	u32 i;
++	struct bam_desc_hw *desc;
++	unsigned int num_alloc = 0;
++
++
++	if (!is_slave_direction(desc_data->dir)) {
++		dev_err(bdev->dev, "invalid dma direction\n");
++		return NULL;
++	}
++
++	/* calculate number of required entries */
++	for (i = 0; i < desc_data->sgl_cnt; i++)
++		num_alloc += DIV_ROUND_UP(
++			sg_dma_len(&desc_data->bam_sgl[i].sgl), BAM_FIFO_SIZE);
++
++	/* allocate enough room to accommodate the number of entries */
++	async_desc = kzalloc(sizeof(*async_desc) +
++			(num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
++
++	if (!async_desc)
++		goto err_out;
++
++	if (flags & DMA_PREP_FENCE)
++		async_desc->flags |= DESC_FLAG_NWD;
++
++	if (flags & DMA_PREP_INTERRUPT)
++		async_desc->flags |= DESC_FLAG_EOT;
++	else
++		async_desc->flags |= DESC_FLAG_INT;
++
++	async_desc->num_desc = num_alloc;
++	async_desc->curr_desc = async_desc->desc;
++	async_desc->dir = desc_data->dir;
++
++	/* fill in temporary descriptors */
++	desc = async_desc->desc;
++	for (i = 0; i < desc_data->sgl_cnt; i++) {
++		unsigned int remainder;
++		unsigned int curr_offset = 0;
++
++		remainder = sg_dma_len(&desc_data->bam_sgl[i].sgl);
++
++		do {
++			desc->addr = cpu_to_le32(
++				sg_dma_address(&desc_data->bam_sgl[i].sgl) +
++						 curr_offset);
++
++			if (desc_data->bam_sgl[i].dma_flags)
++				desc->flags |= cpu_to_le16(
++					desc_data->bam_sgl[i].dma_flags);
++
++			if (remainder > BAM_FIFO_SIZE) {
++				desc->size = cpu_to_le16(BAM_FIFO_SIZE);
++				remainder -= BAM_FIFO_SIZE;
++				curr_offset += BAM_FIFO_SIZE;
++			} else {
++				desc->size = cpu_to_le16(remainder);
++				remainder = 0;
++			}
++
++			async_desc->length += desc->size;
++			desc++;
++		} while (remainder > 0);
++	}
++
++	return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
++
++err_out:
++	kfree(async_desc);
++	return NULL;
++}
++
++/**
+  * bam_dma_terminate_all - terminate all transactions on a channel
+  * @bchan: bam dma channel
+  *
+@@ -960,7 +1043,7 @@ static void bam_start_dma(struct bam_chan *bchan)
+
+	/* set any special flags on the last descriptor */
+	if (async_desc->num_desc == async_desc->xfer_len)
+-		desc[async_desc->xfer_len - 1].flags =
++		desc[async_desc->xfer_len - 1].flags |=
+					cpu_to_le16(async_desc->flags);
+	else
+		desc[async_desc->xfer_len - 1].flags |=
+@@ -1237,6 +1320,8 @@ static int bam_dma_probe(struct platform_device *pdev)
+	bdev->common.device_alloc_chan_resources = bam_alloc_chan;
+	bdev->common.device_free_chan_resources = bam_free_chan;
+	bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
++	bdev->common.device_prep_dma_custom_mapping =
++		bam_prep_dma_custom_mapping;
+	bdev->common.device_config = bam_slave_config;
+	bdev->common.device_pause = bam_pause;
+	bdev->common.device_resume = bam_resume;
+diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h
+index 7e87a85..7113c77 100644
+--- a/include/linux/dma/qcom_bam_dma.h
++++ b/include/linux/dma/qcom_bam_dma.h
+@@ -65,6 +65,19 @@ enum bam_command_type {
+ };
+
+ /*
++ * QCOM BAM DMA custom data
++ *
++ * @sgl_cnt: number of sgl in bam_sgl
++ * @dir: DMA data transfer direction
++ * @bam_sgl: BAM SGL pointer
++ */
++struct qcom_bam_custom_data {
++	u32 sgl_cnt;
++	enum dma_transfer_direction dir;
++	struct qcom_bam_sgl *bam_sgl;
++};
++
++/*
+  * qcom_bam_sg_init_table - Init QCOM BAM SGL
+  * @bam_sgl: bam sgl
+  * @nents: number of entries in bam sgl
+diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
+index cc535a4..627c125 100644
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -692,6 +692,8 @@ struct dma_filter {
+  *	be called after period_len bytes have been transferred.
+  * @device_prep_interleaved_dma: Transfer expression in a generic way.
+  * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
++ * @device_prep_dma_custom_mapping: prepares a dma operation from dma driver
++ * 	specific custom data
+  * @device_config: Pushes a new configuration to a channel, return 0 or an error
+  *	code
+  * @device_pause: Pauses any transfer happening on a channel. Returns
+@@ -783,6 +785,9 @@ struct dma_device {
+	struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
+		struct dma_chan *chan, dma_addr_t dst, u64 data,
+		unsigned long flags);
++	struct dma_async_tx_descriptor *(*device_prep_dma_custom_mapping)(
++		struct dma_chan *chan, void *data,
++		unsigned long flags);
+
+	int (*device_config)(struct dma_chan *chan,
+			     struct dma_slave_config *config);
+@@ -899,6 +904,15 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
+			src_sg, src_nents, flags);
+ }
+
++static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_custom_mapping(
++		struct dma_chan *chan,
++		void *data,
++		unsigned long flags)
++{
++	return chan->device->device_prep_dma_custom_mapping(chan, data,
++			flags);
++}
++
+ /**
+  * dmaengine_terminate_all() - Terminate all active DMA transfers
+  * @chan: The channel for which to terminate the transfers
+--
+2.7.2
diff --git a/target/linux/ipq806x/patches-4.9/862-dts-ipq4019-add-nand-and-qpic-bam-dma-node.patch b/target/linux/ipq806x/patches-4.9/862-dts-ipq4019-add-nand-and-qpic-bam-dma-node.patch
new file mode 100644
index 0000000..6675be9
--- /dev/null
+++ b/target/linux/ipq806x/patches-4.9/862-dts-ipq4019-add-nand-and-qpic-bam-dma-node.patch
@@ -0,0 +1,121 @@
+From 02bbf3c46e1e38e9ca699143566903683e3a015d Mon Sep 17 00:00:00 2001
+From: Ram Chandra Jangir <rjangir at codeaurora.org>
+Date: Thu, 20 Apr 2017 10:45:00 +0530
+Subject: [PATCH] dts: ipq4019: add nand and qpic bam dma node
+
+This change adds QPIC BAM dma and NAND driver node's in
+IPQ4019 device tree, also enable this for AP-DK04.1-C1
+board.
+
+Signed-off-by: Ram Chandra Jangir <rjangir at codeaurora.org>
+---
+ arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi | 39 +++++++++++++++++++++++++++
+ arch/arm/boot/dts/qcom-ipq4019.dtsi           | 38 ++++++++++++++++++++++++++
+ 2 files changed, 77 insertions(+)
+
+diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi
+index 09fb047..91630dc 100644
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi
+@@ -101,6 +101,34 @@
+ 					bias-bus-hold;
+ 				};
+ 			};
++
++			nand_pins: nand_pins {
++				mux {
++					pins = "gpio52", "gpio53", "gpio54",
++						"gpio55", "gpio56", "gpio57",
++						"gpio58", "gpio59", "gpio60",
++						"gpio61", "gpio62", "gpio63",
++						"gpio64", "gpio65", "gpio66",
++						"gpio67", "gpio68", "gpio69";
++					function = "qpic_pad";
++					bias-disable;
++				};
++
++				pullups {
++					pins = "gpio52", "gpio53", "gpio58",
++						"gpio59";
++					bias-pull-up;
++				};
++
++				pulldowns {
++					pins = "gpio54", "gpio55", "gpio56",
++						"gpio57", "gpio60", "gpio61",
++						"gpio62", "gpio63", "gpio64",
++						"gpio65", "gpio66", "gpio67",
++						"gpio68", "gpio69";
++					bias-pull-down;
++				};
++			};
+ 		};
+ 
+ 		blsp_dma: dma at 7884000 {
+@@ -204,5 +233,15 @@
+ 		wifi at a800000 {
+ 			status = "ok";
+ 		};
++
++		qpic_bam: dma at 7984000 {
++			status = "ok";
++		};
++
++		nand: qpic-nand at 79b0000 {
++			pinctrl-0 = <&nand_pins>;
++			pinctrl-names = "default";
++			status = "ok";
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+index 52a64e7..740808b 100644
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -593,5 +593,43 @@
+ 					  "legacy";
+ 			status = "disabled";
+ 		};
++
++		qpic_bam: dma at 7984000 {
++			compatible = "qcom,bam-v1.7.0";
++			reg = <0x7984000 0x1a000>;
++			interrupts = <0 101 0>;
++			clocks = <&gcc GCC_QPIC_AHB_CLK>;
++			clock-names = "bam_clk";
++			#dma-cells = <1>;
++			qcom,ee = <0>;
++			status = "disabled";
++		};
++
++		nand: qpic-nand at 79b0000 {
++			compatible = "qcom,ebi2-nandc-bam", "qcom,msm-nand";
++			reg = <0x79b0000 0x1000>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			clocks = <&gcc GCC_QPIC_CLK>,
++				<&gcc GCC_QPIC_AHB_CLK>;
++			clock-names = "core", "aon";
++
++			dmas = <&qpic_bam 0>,
++				<&qpic_bam 1>,
++				<&qpic_bam 2>;
++			dma-names = "tx", "rx", "cmd";
++			status = "disabled";
++
++			nandcs at 0 {
++				compatible = "qcom,nandcs";
++				reg = <0>;
++				#address-cells = <1>;
++				#size-cells = <1>;
++
++				nand-ecc-strength = <4>;
++				nand-ecc-step-size = <512>;
++				nand-bus-width = <8>;
++			};
++		};
+ 	};
+ };
+--
+2.7.2
+
diff --git a/target/linux/ipq806x/patches-4.9/863-msm-pinctrl-Add-support-to-configure-ipq40xx-GPIO_PU.patch b/target/linux/ipq806x/patches-4.9/863-msm-pinctrl-Add-support-to-configure-ipq40xx-GPIO_PU.patch
new file mode 100644
index 0000000..0b0319e
--- /dev/null
+++ b/target/linux/ipq806x/patches-4.9/863-msm-pinctrl-Add-support-to-configure-ipq40xx-GPIO_PU.patch
@@ -0,0 +1,261 @@
+From e77af7de404eb464f7da9e0daeb8b362cc66a7ba Mon Sep 17 00:00:00 2001
+From: Ram Chandra Jangir <rjangir at codeaurora.org>
+Date: Tue, 9 May 2017 11:45:00 +0530
+Subject: [PATCH] msm: pinctrl: Add support to configure ipq40xx GPIO_PULL bits
+
+GPIO_PULL bits configurations in TLMM_GPIO_CFG register
+differs for IPQ40xx from rest of the other qcom SoC's.
+This change add support to configure the msm_gpio_pull
+bits for ipq40xx, It is required to fix the proper
+configurations of gpio-pull bits for nand pins mux.
+
+IPQ40xx SoC:
+2'b10: Internal pull up enable.
+2'b11: Unsupport
+
+For other SoC's:
+2'b10: Keeper
+2'b11: Pull-Up
+
+Signed-off-by: Ram Chandra Jangir <rjangir at codeaurora.org>
+---
+ drivers/pinctrl/qcom/pinctrl-apq8064.c |  1 +
+ drivers/pinctrl/qcom/pinctrl-apq8084.c |  1 +
+ drivers/pinctrl/qcom/pinctrl-ipq4019.c |  8 ++++++++
+ drivers/pinctrl/qcom/pinctrl-ipq8064.c |  1 +
+ drivers/pinctrl/qcom/pinctrl-mdm9615.c |  1 +
+ drivers/pinctrl/qcom/pinctrl-msm.c     | 21 ++++++++-------------
+ drivers/pinctrl/qcom/pinctrl-msm.h     | 19 +++++++++++++++++++
+ drivers/pinctrl/qcom/pinctrl-msm8660.c |  1 +
+ drivers/pinctrl/qcom/pinctrl-msm8916.c |  1 +
+ drivers/pinctrl/qcom/pinctrl-msm8960.c |  1 +
+ drivers/pinctrl/qcom/pinctrl-msm8x74.c |  1 +
+ 11 files changed, 43 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c
+index cd96699..63e9a7e 100644
+--- a/drivers/pinctrl/qcom/pinctrl-apq8064.c
++++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c
+@@ -597,6 +597,7 @@ static const struct msm_pinctrl_soc_data apq8064_pinctrl = {
+ 	.groups = apq8064_groups,
+ 	.ngroups = ARRAY_SIZE(apq8064_groups),
+ 	.ngpios = NUM_GPIO_PINGROUPS,
++	.gpio_pull = &msm_gpio_pull,
+ };
+ 
+ static int apq8064_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/qcom/pinctrl-apq8084.c b/drivers/pinctrl/qcom/pinctrl-apq8084.c
+index d07e8df..892250e 100644
+--- a/drivers/pinctrl/qcom/pinctrl-apq8084.c
++++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c
+@@ -1206,6 +1206,7 @@ static const struct msm_pinctrl_soc_data apq8084_pinctrl = {
+ 	.groups = apq8084_groups,
+ 	.ngroups = ARRAY_SIZE(apq8084_groups),
+ 	.ngpios = NUM_GPIO_PINGROUPS,
++	.gpio_pull = &msm_gpio_pull,
+ };
+ 
+ static int apq8084_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+index 571eb51..040e03c 100644
+--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
++++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+@@ -1531,6 +1531,13 @@ static const struct msm_pingroup ipq4019_groups[] = {
+ 	PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ };
+ 
++static const struct msm_pinctrl_gpio_pull ipq4019_gpio_pull = {
++	.no_pull = 0,
++	.pull_down = 1,
++	.keeper = 0,
++	.pull_up = 2,
++};
++
+ static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
+ 	.pins = ipq4019_pins,
+ 	.npins = ARRAY_SIZE(ipq4019_pins),
+@@ -1539,6 +1546,7 @@ static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
+ 	.groups = ipq4019_groups,
+ 	.ngroups = ARRAY_SIZE(ipq4019_groups),
+ 	.ngpios = 100,
++	.gpio_pull = &ipq4019_gpio_pull,
+ };
+ 
+ static int ipq4019_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
+index bcb29c0..a927251 100644
+--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c
++++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
+@@ -630,6 +630,7 @@ static const struct msm_pinctrl_soc_data ipq8064_pinctrl = {
+ 	.groups = ipq8064_groups,
+ 	.ngroups = ARRAY_SIZE(ipq8064_groups),
+ 	.ngpios = NUM_GPIO_PINGROUPS,
++	.gpio_pull = &msm_gpio_pull,
+ };
+ 
+ static int ipq8064_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
+index 2b8f452..67e6b75 100644
+--- a/drivers/pinctrl/qcom/pinctrl-mdm9615.c
++++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
+@@ -444,6 +444,7 @@ static const struct msm_pinctrl_soc_data mdm9615_pinctrl = {
+ 	.groups = mdm9615_groups,
+ 	.ngroups = ARRAY_SIZE(mdm9615_groups),
+ 	.ngpios = NUM_GPIO_PINGROUPS,
++	.gpio_pull = &msm_gpio_pull,
+ };
+ 
+ static int mdm9615_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index c406b61..ae361a1 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -203,11 +203,6 @@ static int msm_config_reg(struct msm_pinctrl *pctrl,
+ 	return 0;
+ }
+ 
+-#define MSM_NO_PULL	0
+-#define MSM_PULL_DOWN	1
+-#define MSM_KEEPER	2
+-#define MSM_PULL_UP	3
+-
+ static unsigned msm_regval_to_drive(u32 val)
+ {
+ 	return (val + 1) * 2;
+@@ -238,16 +233,16 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
+ 	/* Convert register value to pinconf value */
+ 	switch (param) {
+ 	case PIN_CONFIG_BIAS_DISABLE:
+-		arg = arg == MSM_NO_PULL;
++		arg = arg == pctrl->soc->gpio_pull->no_pull;
+ 		break;
+ 	case PIN_CONFIG_BIAS_PULL_DOWN:
+-		arg = arg == MSM_PULL_DOWN;
++		arg = arg == pctrl->soc->gpio_pull->pull_down;
+ 		break;
+ 	case PIN_CONFIG_BIAS_BUS_HOLD:
+-		arg = arg == MSM_KEEPER;
++		arg = arg == pctrl->soc->gpio_pull->keeper;
+ 		break;
+ 	case PIN_CONFIG_BIAS_PULL_UP:
+-		arg = arg == MSM_PULL_UP;
++		arg = arg == pctrl->soc->gpio_pull->pull_up;
+ 		break;
+ 	case PIN_CONFIG_DRIVE_STRENGTH:
+ 		arg = msm_regval_to_drive(arg);
+@@ -304,16 +299,16 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
+ 		/* Convert pinconf values to register values */
+ 		switch (param) {
+ 		case PIN_CONFIG_BIAS_DISABLE:
+-			arg = MSM_NO_PULL;
++			arg = pctrl->soc->gpio_pull->no_pull;
+ 			break;
+ 		case PIN_CONFIG_BIAS_PULL_DOWN:
+-			arg = MSM_PULL_DOWN;
++			arg = pctrl->soc->gpio_pull->pull_down;
+ 			break;
+ 		case PIN_CONFIG_BIAS_BUS_HOLD:
+-			arg = MSM_KEEPER;
++			arg = pctrl->soc->gpio_pull->keeper;
+ 			break;
+ 		case PIN_CONFIG_BIAS_PULL_UP:
+-			arg = MSM_PULL_UP;
++			arg = pctrl->soc->gpio_pull->pull_up;
+ 			break;
+ 		case PIN_CONFIG_DRIVE_STRENGTH:
+ 			/* Check for invalid values */
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
+index 54fdd04..090aed9 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.h
++++ b/drivers/pinctrl/qcom/pinctrl-msm.h
+@@ -98,6 +98,16 @@ struct msm_pingroup {
+ };
+ 
+ /**
++ * struct msm_pinctrl_gpio_pull - pinctrl pull value bit field descriptor
++ */
++struct msm_pinctrl_gpio_pull {
++	unsigned no_pull;
++	unsigned pull_down;
++	unsigned keeper;
++	unsigned pull_up;
++};
++
++/**
+  * struct msm_pinctrl_soc_data - Qualcomm pin controller driver configuration
+  * @pins:       An array describing all pins the pin controller affects.
+  * @npins:      The number of entries in @pins.
+@@ -106,6 +116,7 @@ struct msm_pingroup {
+  * @groups:     An array describing all pin groups the pin SoC supports.
+  * @ngroups:    The numbmer of entries in @groups.
+  * @ngpio:      The number of pingroups the driver should expose as GPIOs.
++ * @gpio_pull_val: The pull value bit field descriptor.
+  */
+ struct msm_pinctrl_soc_data {
+ 	const struct pinctrl_pin_desc *pins;
+@@ -115,6 +126,14 @@ struct msm_pinctrl_soc_data {
+ 	const struct msm_pingroup *groups;
+ 	unsigned ngroups;
+ 	unsigned ngpios;
++	const struct msm_pinctrl_gpio_pull *gpio_pull;
++};
++
++static const struct msm_pinctrl_gpio_pull msm_gpio_pull = {
++	.no_pull = 0,
++	.pull_down = 1,
++	.keeper = 2,
++	.pull_up = 3,
+ };
+ 
+ int msm_pinctrl_probe(struct platform_device *pdev,
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c
+index 5591d09..a8899d9 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c
+@@ -979,6 +979,7 @@ static const struct msm_pinctrl_soc_data msm8660_pinctrl = {
+ 	.groups = msm8660_groups,
+ 	.ngroups = ARRAY_SIZE(msm8660_groups),
+ 	.ngpios = NUM_GPIO_PINGROUPS,
++	.gpio_pull = &msm_gpio_pull,
+ };
+ 
+ static int msm8660_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
+index 20ebf24..c45c2bb 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
+@@ -967,6 +967,7 @@ static const struct msm_pinctrl_soc_data msm8916_pinctrl = {
+ 	.groups = msm8916_groups,
+ 	.ngroups = ARRAY_SIZE(msm8916_groups),
+ 	.ngpios = NUM_GPIO_PINGROUPS,
++	.gpio_pull = &msm_gpio_pull,
+ };
+ 
+ static int msm8916_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c
+index ed23e36..9411176 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c
+@@ -1244,6 +1244,7 @@ static const struct msm_pinctrl_soc_data msm8960_pinctrl = {
+ 	.groups = msm8960_groups,
+ 	.ngroups = ARRAY_SIZE(msm8960_groups),
+ 	.ngpios = NUM_GPIO_PINGROUPS,
++	.gpio_pull = &msm_gpio_pull,
+ };
+ 
+ static int msm8960_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+index 9eb63d3..7740875 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+@@ -1069,6 +1069,7 @@ static const struct msm_pinctrl_soc_data msm8x74_pinctrl = {
+ 	.groups = msm8x74_groups,
+ 	.ngroups = ARRAY_SIZE(msm8x74_groups),
+ 	.ngpios = NUM_GPIO_PINGROUPS,
++	.gpio_pull = &msm_gpio_pull,
+ };
+ 
+ static int msm8x74_pinctrl_probe(struct platform_device *pdev)
+-- 
+2.7.2
+
-- 
2.7.2




More information about the Lede-dev mailing list