[source] layerscape: add ls1088ardb device support
LEDE Commits
lede-commits at lists.infradead.org
Tue Jan 3 06:19:44 PST 2017
jow pushed a commit to source.git, branch master:
https://git.lede-project.org/1866368a8ab8cacf73aa47f67138040d5620439d
commit 1866368a8ab8cacf73aa47f67138040d5620439d
Author: Yutang Jiang <yutang.jiang at nxp.com>
AuthorDate: Sat Dec 24 01:11:32 2016 +0800
layerscape: add ls1088ardb device support
LS1088A is an ARMv8 implementation combining eight ARM A53 processor
cores. The LS1088ARDB is an evaluatoin platform that supports the
LS1088A family SoCs.
Features summary:
- Eight 64-bit ARM v8 Cortex-A53 CPUs
- Data path acceleration architecture 2.0 (DPAA2)
- Ethernet interfaces
- QUADSPI flash, 3 PCIe, 2 USB, 1 SD, 2 DUARTs etc
Signed-off-by: Yutang Jiang <yutang.jiang at nxp.com>
---
target/linux/layerscape/config-4.4 | 13 +
target/linux/layerscape/image/Makefile | 14 +
...rm64-Add-DTS-support-for-FSL-s-LS1088ARDB.patch | 790 ++
.../patches-4.4/3139-ls1088ardb-add-ITS-file.patch | 69 +
.../3141-caam-add-caam-node-for-ls1088a.patch | 62 +
...-fsl-quadspi-Enable-fast-read-for-LS1088A.patch | 44 +
...l-arch_setup_dma_ops-before-using-dma_ops.patch | 53 +
...-mc-Added-generic-MSI-support-for-FSL-MC-.patch | 400 +
...-mc-Added-GICv3-ITS-support-for-FSL-MC-MS.patch | 167 +
...-mc-Extended-MC-bus-allocator-to-include-.patch | 326 +
...-mc-Changed-DPRC-built-in-portal-s-mc_io-.patch | 44 +
...-mc-Populate-the-IRQ-pool-for-an-MC-bus-i.patch | 109 +
...ng-fsl-mc-set-MSI-domain-for-DPRC-objects.patch | 103 +
...fsl-mc-Fixed-bug-in-dprc_probe-error-path.patch | 72 +
...aging-fsl-mc-Added-DPRC-interrupt-handler.patch | 301 +
...-mc-Added-MSI-support-to-the-MC-bus-drive.patch | 59 +
...taging-fsl-mc-Remove-unneeded-parentheses.patch | 39 +
...-fsl-mc-Do-not-allow-building-as-a-module.patch | 30 +
...156-staging-fsl-mc-Avoid-section-mismatch.patch | 43 +
...-mc-Remove-unneeded-else-following-a-retu.patch | 45 +
...ng-fsl-mc-Drop-unneeded-void-pointer-cast.patch | 43 +
...fsl-mc-bus-Eliminate-double-function-call.patch | 73 +
...ging-fsl-mc-Replace-pr_debug-with-dev_dbg.patch | 96 +
...taging-fsl-mc-Replace-pr_err-with-dev_err.patch | 83 +
...-mc-fix-incorrect-type-passed-to-dev_dbg-.patch | 48 +
...-mc-fix-incorrect-type-passed-to-dev_err-.patch | 38 +
...-fsl-mc-get-rid-of-mutex_locked-variables.patch | 207 +
.../7165-staging-fsl-mc-TODO-updates.patch | 49 +
...aging-fsl-mc-DPAA2-overview-readme-update.patch | 279 +
...-mc-update-dpmcp-binary-interface-to-v3.0.patch | 123 +
...l-mc-update-dpbp-binary-interface-to-v2.2.patch | 208 +
...l-mc-update-dprc-binary-interface-to-v5.1.patch | 206 +
...-mc-don-t-use-object-versions-to-make-bin.patch | 136 +
...-mc-set-up-coherent-dma-ops-for-added-dev.patch | 29 +
...-mc-set-cacheable-flag-for-added-devices-.patch | 30 +
...-mc-get-version-of-root-dprc-from-MC-hard.patch | 106 +
...174-staging-fsl-mc-add-dprc-version-check.patch | 90 +
...-mc-add-quirk-handling-for-dpseci-objects.patch | 38 +
...76-staging-fsl-mc-add-dpmcp-version-check.patch | 56 +
...-mc-return-EINVAL-for-all-fsl_mc_portal_a.patch | 30 +
.../7178-staging-fsl-mc-bus-Drop-warning.patch | 47 +
...-mc-add-support-for-the-modalias-sysfs-at.patch | 54 +
...-mc-implement-uevent-callback-and-set-the.patch | 32 +
...ging-fsl-mc-clean-up-the-device-id-struct.patch | 85 +
...-mc-add-support-for-device-table-matching.patch | 98 +
...7183-staging-fsl-mc-export-mc_get_version.patch | 23 +
...ng-fsl-mc-make-fsl_mc_is_root_dprc-global.patch | 77 +
...-fsl-mc-fix-asymmetry-in-destroy-of-mc_io.patch | 62 +
...-staging-fsl-mc-dprc-add-missing-irq-free.patch | 28 +
...-mc-dprc-fix-ordering-problem-freeing-res.patch | 41 +
...fsl-mc-properly-set-hwirq-in-msi-set_desc.patch | 48 +
...-mc-update-dpcon-binary-interface-to-v2.2.patch | 964 ++
...-mc-root-dprc-rescan-attribute-to-sync-ke.patch | 59 +
...-mc-bus-rescan-attribute-to-sync-kernel-w.patch | 78 +
...-mc-Propagate-driver_override-for-a-child.patch | 193 +
...-mc-add-device-binding-path-driver_overri.patch | 111 +
...fsl-mc-export-irq-cleanup-for-vfio-to-use.patch | 47 +
...95-increment-MC_CMD_COMPLETION_TIMEOUT_MS.patch | 88 +
...g-fsl-mc-make-fsl_mc_get_root_dprc-public.patch | 45 +
...-fsl-mc-Management-Complex-restool-driver.patch | 489 +
.../7198-staging-fsl-mc-dpio-services-driver.patch | 8943 ++++++++++++++
.../7199-dpaa2-dpio-Cosmetic-cleanup.patch | 35 +
...aging-fsl-mc-dpio-driver-match-id-cleanup.patch | 26 +
...a2-eth-initial-commit-of-dpaa2-eth-driver.patch | 12268 +++++++++++++++++++
...sl-dpaa2-eth-code-cleanup-for-upstreaming.patch | 3257 +++++
...2-eth-Update-description-of-DPNI-counters.patch | 37 +
...sl-dpaa2-eth-dpni-Clear-compiler-warnings.patch | 38 +
...paa2-eth-sanitize-supported-private-flags.patch | 57 +
.../7206-fsl-dpaa2-eth-match-id-cleanup.patch | 26 +
...-fsl-dpaa2-eth-add-device-table-to-driver.patch | 22 +
...-dpaa2-mac-Added-MAC-PHY-interface-driver.patch | 2347 ++++
...ging-fsl-dpaa2-mac-Interrupt-code-cleanup.patch | 182 +
...fsl-dpaa2-mac-Fix-unregister_netdev-issue.patch | 42 +
...ng-fsl-dpaa2-mac-Don-t-call-devm_free_irq.patch | 42 +
...ing-fsl-dpaa2-mac-Use-of_property_read_32.patch | 43 +
...aging-fsl-dpaa2-mac-Remove-version-checks.patch | 61 +
...14-staging-fsl-dpaa2-mac-match-id-cleanup.patch | 26 +
...paa2-evb-Added-Edge-Virtual-Bridge-driver.patch | 2918 +++++
.../7216-dpaa2-evb-Fix-interrupt-handling.patch | 69 +
.../7217-dpaa2-evb-Add-object-version-check.patch | 43 +
.../7218-dpaa2-evb-Cosmetic-cleanup.patch | 20 +
.../7219-dpaa2-evb-match-id-cleanup.patch | 26 +
.../7220-dpaa2-ethsw-Ethernet-Switch-driver.patch | 6605 ++++++++++
.../7221-dpaa2-ethsw-match-id-cleanup.patch | 26 +
...thsw-fix-compile-error-on-backport-to-4.4.patch | 21 +
...dded-domain-bus-token-DOMAIN_BUS_FSL_MC_M.patch | 26 +
...Added-FSL-MC-specific-member-to-the-msi_d.patch | 40 +
...-dpaa2-evb-fix-4.4-backport-compile-error.patch | 21 +
...ers-mmc-Add-compatible-string-for-LS1088A.patch | 24 +
.../8137-armv8-ls1088a-Add-PCIe-compatible.patch | 38 +
90 files changed, 44749 insertions(+)
diff --git a/target/linux/layerscape/config-4.4 b/target/linux/layerscape/config-4.4
index 88da774..d334e42 100644
--- a/target/linux/layerscape/config-4.4
+++ b/target/linux/layerscape/config-4.4
@@ -169,6 +169,8 @@ CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IMX=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
# CONFIG_IMX2_WDT is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_IOMMU_HELPER=y
@@ -296,3 +298,14 @@ CONFIG_XPS=y
CONFIG_ZLIB_INFLATE=y
CONFIG_MTD_SPI_NOR=y
CONFIG_SPI_FSL_QUADSPI=y
+CONFIG_FSL_MC_BUS=y
+CONFIG_FSL_MC_RESTOOL=y
+CONFIG_FSL_MC_DPIO=y
+# CONFIG_FSL_QBMAN_DEBUG is not set
+CONFIG_FSL_DPAA2=y
+CONFIG_FSL_DPAA2_ETH=y
+# CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE is not set
+CONFIG_FSL_DPAA2_MAC=y
+# CONFIG_FSL_DPAA2_MAC_NETDEVS is not set
+CONFIG_FSL_DPAA2_EVB=y
+CONFIG_FSL_DPAA2_ETHSW=y
diff --git a/target/linux/layerscape/image/Makefile b/target/linux/layerscape/image/Makefile
index a06ae9e..c709557 100644
--- a/target/linux/layerscape/image/Makefile
+++ b/target/linux/layerscape/image/Makefile
@@ -92,4 +92,18 @@ endif
endef
TARGET_DEVICES += ls1012ardb
+define Device/ls1088ardb
+ DEVICE_TITLE := ls1088ardb-$(SUBTARGET)
+ DEVICE_PACKAGES += rcw-layerscape-ls1088ardb uboot-layerscape-$(SUBTARGET)-ls1088ardb mc-binary-ls1088ardb
+ifeq ($(SUBTARGET),64b)
+ DEVICE_DTS = freescale/fsl-ls1088a-rdb
+endif
+ifeq ($(SUBTARGET),32b)
+ DEVICE_DTS = ../../../arm64/boot/dts/freescale/fsl-ls1088a-rdb
+endif
+ IMAGE/firmware.bin = append-ls-dtb $$(DEVICE_DTS) | pad-to 1M | append-kernel | pad-to 6M | \
+ append-ls-rootfs-ext4 $(1) 17M | check-size 24117249
+endef
+TARGET_DEVICES += ls1088ardb
+
$(eval $(call BuildImage))
diff --git a/target/linux/layerscape/patches-4.4/3135-arm64-Add-DTS-support-for-FSL-s-LS1088ARDB.patch b/target/linux/layerscape/patches-4.4/3135-arm64-Add-DTS-support-for-FSL-s-LS1088ARDB.patch
new file mode 100644
index 0000000..c8aaeee
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/3135-arm64-Add-DTS-support-for-FSL-s-LS1088ARDB.patch
@@ -0,0 +1,790 @@
+From cbacf87fa6fb262c98033405f15697798c3a9c5d Mon Sep 17 00:00:00 2001
+From: Zhao Qiang <qiang.zhao at nxp.com>
+Date: Sun, 9 Oct 2016 14:31:50 +0800
+Subject: [PATCH 135/141] arm64: Add DTS support for FSL's LS1088ARDB
+
+Signed-off-by: Zhao Qiang <qiang.zhao at nxp.com>
+---
+ arch/arm64/boot/dts/freescale/Makefile | 1 +
+ arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts | 203 ++++++++
+ arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 557 +++++++++++++++++++++
+ 3 files changed, 761 insertions(+)
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+
+--- a/arch/arm64/boot/dts/freescale/Makefile
++++ b/arch/arm64/boot/dts/freescale/Makefile
+@@ -5,6 +5,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-rdb.dtb
+
+ always := $(dtb-y)
+ subdir-y := $(dts-dirs)
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+@@ -0,0 +1,203 @@
++/*
++ * Device Tree file for Freescale LS1088a RDB board
++ *
++ * Copyright (C) 2015, Freescale Semiconductor
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++/dts-v1/;
++
++#include "fsl-ls1088a.dtsi"
++
++/ {
++ model = "Freescale Layerscape 1088a RDB Board";
++ compatible = "fsl,ls1088a-rdb", "fsl,ls1088a";
++};
++
++&esdhc {
++ status = "okay";
++};
++
++&ifc {
++ status = "disabled";
++};
++
++&ftm0 {
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++ pca9547 at 77 {
++ compatible = "philips,pca9547";
++ reg = <0x77>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ i2c at 2 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x2>;
++
++ ina220 at 40 {
++ compatible = "ti,ina220";
++ reg = <0x40>;
++ shunt-resistor = <1000>;
++ };
++ };
++
++ i2c at 3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ rtc at 51 {
++ compatible = "nxp,pcf2129";
++ reg = <0x51>;
++ /* IRQ10_B */
++ interrupts = <0 150 0x4>;
++ };
++
++ adt7461a at 4c {
++ compatible = "adt7461a";
++ reg = <0x4c>;
++ };
++ };
++ };
++};
++
++&i2c1 {
++ status = "disabled";
++};
++
++&i2c2 {
++ status = "disabled";
++};
++
++&i2c3 {
++ status = "disabled";
++};
++
++&dspi {
++ status = "disabled";
++};
++
++&qspi {
++ status = "okay";
++ qflash0: s25fs512s at 0 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++
++ qflash1: s25fs512s at 1 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ reg = <1>;
++ };
++};
++
++&sata0 {
++ status = "okay";
++};
++
++&usb0 {
++ status = "okay";
++};
++
++&usb1 {
++ status = "okay";
++};
++
++&serial0 {
++ status = "okay";
++};
++
++&serial1 {
++ status = "okay";
++};
++
++&emdio1 {
++ /* Freescale F104 PHY1 */
++ mdio1_phy1: emdio1_phy at 1 {
++ reg = <0x1c>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy2: emdio1_phy at 2 {
++ reg = <0x1d>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy3: emdio1_phy at 3 {
++ reg = <0x1e>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy4: emdio1_phy at 4 {
++ reg = <0x1f>;
++ phy-connection-type = "qsgmii";
++ };
++ /* F104 PHY2 */
++ mdio1_phy5: emdio1_phy at 5 {
++ reg = <0x0c>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy6: emdio1_phy at 6 {
++ reg = <0x0d>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy7: emdio1_phy at 7 {
++ reg = <0x0e>;
++ phy-connection-type = "qsgmii";
++ };
++ mdio1_phy8: emdio1_phy at 8 {
++ reg = <0x0f>;
++ phy-connection-type = "qsgmii";
++ };
++};
++
++&emdio2 {
++ /* Aquantia AQR105 10G PHY */
++ mdio2_phy1: emdio2_phy at 1 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x0>;
++ phy-connection-type = "xfi";
++ };
++};
++
++/* DPMAC connections to external PHYs
++ * based on LS1088A RM RevC - $24.1.2 SerDes Options
++ */
++/* DPMAC1 is 10G SFP+, fixed link */
++&dpmac2 {
++ phy-handle = <&mdio2_phy1>;
++};
++&dpmac3 {
++ phy-handle = <&mdio1_phy5>;
++};
++&dpmac4 {
++ phy-handle = <&mdio1_phy6>;
++};
++&dpmac5 {
++ phy-handle = <&mdio1_phy7>;
++};
++&dpmac6 {
++ phy-handle = <&mdio1_phy8>;
++};
++&dpmac7 {
++ phy-handle = <&mdio1_phy1>;
++};
++&dpmac8 {
++ phy-handle = <&mdio1_phy2>;
++};
++&dpmac9 {
++ phy-handle = <&mdio1_phy3>;
++};
++&dpmac10 {
++ phy-handle = <&mdio1_phy4>;
++};
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+@@ -0,0 +1,557 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1088A family SoC.
++ *
++ * Copyright (C) 2015, Freescale Semiconductor
++ *
++ */
++
++/memreserve/ 0x80000000 0x00010000;
++
++/ {
++ compatible = "fsl,ls1088a";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ cpus {
++ #address-cells = <2>;
++ #size-cells = <0>;
++
++ /* We have 2 clusters having 4 Cortex-A57 cores each */
++ cpu0: cpu at 0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x0 0x0>;
++ clocks = <&clockgen 1 0>;
++ };
++
++ cpu1: cpu at 1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x0 0x1>;
++ clocks = <&clockgen 1 0>;
++ };
++
++ cpu2: cpu at 2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x0 0x2>;
++ clocks = <&clockgen 1 0>;
++ };
++
++ cpu3: cpu at 3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x0 0x3>;
++ clocks = <&clockgen 1 0>;
++ };
++
++ cpu4: cpu at 100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x0 0x100>;
++ clocks = <&clockgen 1 1>;
++ };
++
++ cpu5: cpu at 101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x0 0x101>;
++ clocks = <&clockgen 1 1>;
++ };
++
++ cpu6: cpu at 102 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x0 0x102>;
++ clocks = <&clockgen 1 1>;
++ };
++
++ cpu7: cpu at 103 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53";
++ reg = <0x0 0x103>;
++ clocks = <&clockgen 1 1>;
++ };
++ };
++
++ pmu {
++ compatible = "arm,armv8-pmuv3";
++ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
++ };
++
++ gic: interrupt-controller at 6000000 {
++ compatible = "arm,gic-v3";
++ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */
++ <0x0 0x06100000 0 0x100000>, /* GICR(RD_base+SGI_base)*/
++ <0x0 0x0c0c0000 0 0x2000>, /* GICC */
++ <0x0 0x0c0d0000 0 0x1000>, /* GICH */
++ <0x0 0x0c0e0000 0 0x20000>; /* GICV */
++ #interrupt-cells = <3>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++ interrupt-controller;
++ interrupts = <1 9 0x4>;
++
++ its: gic-its at 6020000 {
++ compatible = "arm,gic-v3-its";
++ msi-controller;
++ reg = <0x0 0x6020000 0 0x20000>;
++ };
++ };
++
++ sysclk: sysclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "sysclk";
++ };
++
++ clockgen: clocking at 1300000 {
++ compatible = "fsl,ls2080a-clockgen", "fsl,ls1088a-clockgen";
++ reg = <0 0x1300000 0 0xa0000>;
++ #clock-cells = <2>;
++ clocks = <&sysclk>;
++ };
++
++ serial0: serial at 21c0500 {
++ device_type = "serial";
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21c0500 0x0 0x100>;
++ clocks = <&clockgen 4 3>;
++ interrupts = <0 32 0x4>; /* Level high type */
++ };
++
++ serial1: serial at 21c0600 {
++ device_type = "serial";
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21c0600 0x0 0x100>;
++ clocks = <&clockgen 4 3>;
++ interrupts = <0 32 0x4>; /* Level high type */
++ };
++
++ gpio0: gpio at 2300000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2300000 0x0 0x10000>;
++ interrupts = <0 36 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio1: gpio at 2310000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2310000 0x0 0x10000>;
++ interrupts = <0 36 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio2: gpio at 2320000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2320000 0x0 0x10000>;
++ interrupts = <0 37 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio3: gpio at 2330000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2330000 0x0 0x10000>;
++ interrupts = <0 37 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ /* TODO: WRIOP (CCSR?) */
++ emdio1: mdio at 0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8B96000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian; /* force the driver in LE mode */
++
++ /* Not necessary on the QDS, but needed on the RDB */
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ emdio2: mdio at 0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8B97000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian; /* force the driver in LE mode */
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ ifc: ifc at 2240000 {
++ compatible = "fsl,ifc", "simple-bus";
++ reg = <0x0 0x2240000 0x0 0x20000>;
++ interrupts = <0 21 0x4>; /* Level high type */
++ little-endian;
++ #address-cells = <2>;
++ #size-cells = <1>;
++
++ ranges = <0 0 0x5 0x80000000 0x08000000
++ 2 0 0x5 0x30000000 0x00010000
++ 3 0 0x5 0x20000000 0x00010000>;
++ };
++
++ esdhc: esdhc at 2140000 {
++ compatible = "fsl,ls2080a-esdhc", "fsl,ls1088a-esdhc", "fsl,esdhc";
++ reg = <0x0 0x2140000 0x0 0x10000>;
++ interrupts = <0 28 0x4>; /* Level high type */
++ clock-frequency = <0>;
++ voltage-ranges = <1800 1800 3300 3300>;
++ sdhci,auto-cmd12;
++ little-endian;
++ bus-width = <4>;
++ };
++
++ ftm0: ftm0 at 2800000 {
++ compatible = "fsl,ftm-alarm";
++ reg = <0x0 0x2800000 0x0 0x10000>;
++ interrupts = <0 44 4>;
++ };
++
++ reset: reset at 1E60000 {
++ compatible = "fsl,ls-reset";
++ reg = <0x0 0x1E60000 0x0 0x10000>;
++ };
++
++ dspi: dspi at 2100000 {
++ compatible = "fsl,ls2085a-dspi", "fsl,ls1088a-dspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2100000 0x0 0x10000>;
++ interrupts = <0 26 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ clock-names = "dspi";
++ spi-num-chipselects = <5>;
++ bus-num = <0>;
++ };
++
++ i2c0: i2c at 2000000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2000000 0x0 0x10000>;
++ interrupts = <0 34 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c1: i2c at 2010000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2010000 0x0 0x10000>;
++ interrupts = <0 34 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c2: i2c at 2020000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2020000 0x0 0x10000>;
++ interrupts = <0 35 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c3: i2c at 2030000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2030000 0x0 0x10000>;
++ interrupts = <0 35 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ qspi: quadspi at 20c0000 {
++ compatible = "fsl,ls2080a-qspi", "fsl,ls1088a-qspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x20c0000 0x0 0x10000>,
++ <0x0 0x20000000 0x0 0x10000000>;
++ reg-names = "QuadSPI", "QuadSPI-memory";
++ interrupts = <0 25 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "qspi_en", "qspi";
++ };
++
++ pcie at 3400000 {
++ compatible = "fsl,ls1088a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
++ 0x20 0x00000000 0x0 0x00002000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 108 0x4>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>,
++ <0000 0 0 2 &gic 0 0 0 110 4>,
++ <0000 0 0 3 &gic 0 0 0 111 4>,
++ <0000 0 0 4 &gic 0 0 0 112 4>;
++ };
++ pcie at 3500000 {
++ compatible = "fsl,ls1088a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
++ 0x28 0x00000000 0x0 0x00002000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 113 0x4>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>,
++ <0000 0 0 2 &gic 0 0 0 115 4>,
++ <0000 0 0 3 &gic 0 0 0 116 4>,
++ <0000 0 0 4 &gic 0 0 0 117 4>;
++ };
++
++ pcie at 3600000 {
++ compatible = "fsl,ls1088a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
++ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 118 0x4>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>,
++ <0000 0 0 2 &gic 0 0 0 120 4>,
++ <0000 0 0 3 &gic 0 0 0 121 4>,
++ <0000 0 0 4 &gic 0 0 0 122 4>;
++ };
++
++ sata0: sata at 3200000 {
++ compatible = "fsl,ls1088a-ahci", "fsl,ls1043a-ahci";
++ reg = <0x0 0x3200000 0x0 0x10000>;
++ interrupts = <0 133 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ };
++
++ usb0: usb3 at 3100000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <0 80 0x4>; /* Level high type */
++ dr_mode = "host";
++ configure-gfladj;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ usb1: usb3 at 3110000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3110000 0x0 0x10000>;
++ interrupts = <0 81 0x4>; /* Level high type */
++ dr_mode = "host";
++ configure-gfladj;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ smmu: iommu at 5000000 {
++ compatible = "arm,mmu-500";
++ reg = <0 0x5000000 0 0x800000>;
++ #global-interrupts = <12>;
++ interrupts = <0 13 4>, /* global secure fault */
++ <0 14 4>, /* combined secure interrupt */
++ <0 15 4>, /* global non-secure fault */
++ <0 16 4>, /* combined non-secure interrupt */
++ /* performance counter interrupts 0-7 */
++ <0 211 4>,
++ <0 212 4>,
++ <0 213 4>,
++ <0 214 4>,
++ <0 215 4>,
++ <0 216 4>,
++ <0 217 4>,
++ <0 218 4>,
++ /* per context interrupt, 64 interrupts */
++ <0 146 4>,
++ <0 147 4>,
++ <0 148 4>,
++ <0 149 4>,
++ <0 150 4>,
++ <0 151 4>,
++ <0 152 4>,
++ <0 153 4>,
++ <0 154 4>,
++ <0 155 4>,
++ <0 156 4>,
++ <0 157 4>,
++ <0 158 4>,
++ <0 159 4>,
++ <0 160 4>,
++ <0 161 4>,
++ <0 162 4>,
++ <0 163 4>,
++ <0 164 4>,
++ <0 165 4>,
++ <0 166 4>,
++ <0 167 4>,
++ <0 168 4>,
++ <0 169 4>,
++ <0 170 4>,
++ <0 171 4>,
++ <0 172 4>,
++ <0 173 4>,
++ <0 174 4>,
++ <0 175 4>,
++ <0 176 4>,
++ <0 177 4>,
++ <0 178 4>,
++ <0 179 4>,
++ <0 180 4>,
++ <0 181 4>,
++ <0 182 4>,
++ <0 183 4>,
++ <0 184 4>,
++ <0 185 4>,
++ <0 186 4>,
++ <0 187 4>,
++ <0 188 4>,
++ <0 189 4>,
++ <0 190 4>,
++ <0 191 4>,
++ <0 192 4>,
++ <0 193 4>,
++ <0 194 4>,
++ <0 195 4>,
++ <0 196 4>,
++ <0 197 4>,
++ <0 198 4>,
++ <0 199 4>,
++ <0 200 4>,
++ <0 201 4>,
++ <0 202 4>,
++ <0 203 4>,
++ <0 204 4>,
++ <0 205 4>,
++ <0 206 4>,
++ <0 207 4>,
++ <0 208 4>,
++ <0 209 4>;
++ mmu-masters = <&fsl_mc 0x300 0>;
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <1 13 0x1>,/*Phy Secure PPI, edge triggered*/
++ <1 14 0x1>, /*Phy Non-Secure PPI, edge triggered*/
++ <1 11 0x1>, /*Virtual PPI, edge triggered */
++ <1 10 0x1>; /*Hypervisor PPI, edge triggered */
++ };
++
++ fsl_mc: fsl-mc at 80c000000 {
++ compatible = "fsl,qoriq-mc";
++ #stream-id-cells = <2>;
++ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
++ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
++ msi-parent = <&its>;
++ #address-cells = <3>;
++ #size-cells = <1>;
++
++ /*
++ * Region type 0x0 - MC portals
++ * Region type 0x1 - QBMAN portals
++ */
++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
++
++ dpmacs {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ dpmac1: dpmac at 1 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <1>;
++ };
++ dpmac2: dpmac at 2 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <2>;
++ };
++ dpmac3: dpmac at 3 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <3>;
++ };
++ dpmac4: dpmac at 4 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <4>;
++ };
++ dpmac5: dpmac at 5 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <5>;
++ };
++ dpmac6: dpmac at 6 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <6>;
++ };
++ dpmac7: dpmac at 7 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <7>;
++ };
++ dpmac8: dpmac at 8 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <8>;
++ };
++ dpmac9: dpmac at 9 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <9>;
++ };
++ dpmac10: dpmac at 10 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xa>;
++ };
++ };
++ };
++
++
++ memory at 80000000 {
++ device_type = "memory";
++ reg = <0x00000000 0x80000000 0 0x80000000>;
++ /* DRAM space 1 - 2 GB DRAM */
++ };
++};
diff --git a/target/linux/layerscape/patches-4.4/3139-ls1088ardb-add-ITS-file.patch b/target/linux/layerscape/patches-4.4/3139-ls1088ardb-add-ITS-file.patch
new file mode 100644
index 0000000..8b66c3e
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/3139-ls1088ardb-add-ITS-file.patch
@@ -0,0 +1,69 @@
+From caaab508dc2ba749d8394b5934353b1c47f37d75 Mon Sep 17 00:00:00 2001
+From: Zhao Qiang <qiang.zhao at nxp.com>
+Date: Sun, 9 Oct 2016 15:14:16 +0800
+Subject: [PATCH 139/141] ls1088ardb: add ITS file
+
+Signed-off-by: Zhao Qiang <qiang.zhao at nxp.com>
+---
+ kernel-ls1088a-rdb.its | 55 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 55 insertions(+)
+ create mode 100644 kernel-ls1088a-rdb.its
+
+--- /dev/null
++++ b/kernel-ls1088a-rdb.its
+@@ -0,0 +1,55 @@
++/*
++ * Copyright (C) 2015, Freescale Semiconductor
++ *
++ * Raghav Dogra <raghav at freescale.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++/dts-v1/;
++
++/ {
++ description = "Simulator Image file for the LS1088A Linux Kernel";
++ #address-cells = <1>;
++
++ images {
++ kernel at 1 {
++ description = "ARM64 Linux kernel";
++ data = /incbin/("./arch/arm64/boot/Image.gz");
++ type = "kernel";
++ arch = "arm64";
++ os = "linux";
++ compression = "gzip";
++ load = <0x80080000>;
++ entry = <0x80080000>;
++ };
++ fdt at 1 {
++ description = "Flattened Device Tree blob";
++ data = /incbin/("./arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dtb");
++ type = "flat_dt";
++ arch = "arm64";
++ compression = "none";
++ load = <0x90000000>;
++ };
++ ramdisk at 1 {
++ description = "LS2 Ramdisk";
++ data = /incbin/("./fsl-image-core-ls1088ardb-be.ext2.gz");
++ type = "ramdisk";
++ arch = "arm64";
++ os = "linux";
++ compression = "none";
++ };
++ };
++
++ configurations {
++ default = "config at 1";
++ config at 1 {
++ description = "Boot Linux kernel";
++ kernel = "kernel at 1";
++ fdt = "fdt at 1";
++ ramdisk = "ramdisk at 1";
++ };
++ };
++};
diff --git a/target/linux/layerscape/patches-4.4/3141-caam-add-caam-node-for-ls1088a.patch b/target/linux/layerscape/patches-4.4/3141-caam-add-caam-node-for-ls1088a.patch
new file mode 100644
index 0000000..386a321
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/3141-caam-add-caam-node-for-ls1088a.patch
@@ -0,0 +1,62 @@
+From 89b3b66aa955fed15585a4ba7120cf63f9e92aba Mon Sep 17 00:00:00 2001
+From: Zhao Qiang <qiang.zhao at nxp.com>
+Date: Thu, 13 Oct 2016 10:19:08 +0800
+Subject: [PATCH 141/141] caam: add caam node for ls1088a
+
+Signed-off-by: Zhao Qiang <qiang.zhao at nxp.com>
+---
+ arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 43 ++++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
+
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+@@ -485,6 +485,49 @@
+ <1 10 0x1>; /*Hypervisor PPI, edge triggered */
+ };
+
++ crypto: crypto at 8000000 {
++ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0",
++ "fsl,sec-v4.0";
++ fsl,sec-era = <8>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x00 0x8000000 0x100000>;
++ reg = <0x00 0x8000000 0x0 0x100000>;
++ interrupts = <0 139 0x4>;
++
++ sec_jr0: jr at 10000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x10000 0x10000>;
++ interrupts = <0 140 0x4>;
++ };
++
++ sec_jr1: jr at 20000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x20000 0x10000>;
++ interrupts = <0 141 0x4>;
++ };
++
++ sec_jr2: jr at 30000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x30000 0x10000>;
++ interrupts = <0 142 0x4>;
++ };
++
++ sec_jr3: jr at 40000 {
++ compatible = "fsl,sec-v5.4-job-ring",
++ "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x40000 0x10000>;
++ interrupts = <0 143 0x4>;
++ };
++ };
++
+ fsl_mc: fsl-mc at 80c000000 {
+ compatible = "fsl,qoriq-mc";
+ #stream-id-cells = <2>;
diff --git a/target/linux/layerscape/patches-4.4/3226-mtd-spi-nor-fsl-quadspi-Enable-fast-read-for-LS1088A.patch b/target/linux/layerscape/patches-4.4/3226-mtd-spi-nor-fsl-quadspi-Enable-fast-read-for-LS1088A.patch
new file mode 100644
index 0000000..29f887a
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/3226-mtd-spi-nor-fsl-quadspi-Enable-fast-read-for-LS1088A.patch
@@ -0,0 +1,44 @@
+From 72b250c04f543d4eeda06b32e699444b15cac5cc Mon Sep 17 00:00:00 2001
+From: "ying.zhang" <ying.zhang22455 at nxp.com>
+Date: Sat, 17 Dec 2016 00:39:28 +0800
+Subject: [PATCH 226/226] mtd:spi-nor:fsl-quadspi:Enable fast-read for
+ LS1088ARDB
+
+Add fast-read mode for LS1088ARDB board.
+
+Signed-off-by: Yuan Yao <yao.yuan at nxp.com>
+Integrated-by: Jiang Yutang <yutang.jiang at nxp.com>
+---
+ arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts | 2 ++
+ arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 1 +
+ 2 files changed, 3 insertions(+)
+
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+@@ -91,6 +91,7 @@
+ compatible = "spansion,m25p80";
+ #address-cells = <1>;
+ #size-cells = <1>;
++ m25p,fast-read;
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+@@ -99,6 +100,7 @@
+ compatible = "spansion,m25p80";
+ #address-cells = <1>;
+ #size-cells = <1>;
++ m25p,fast-read;
+ spi-max-frequency = <20000000>;
+ reg = <1>;
+ };
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+@@ -294,6 +294,7 @@
+ interrupts = <0 25 0x4>; /* Level high type */
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "qspi_en", "qspi";
++ fsl,qspi-has-second-chip;
+ };
+
+ pcie at 3400000 {
+
diff --git a/target/linux/layerscape/patches-4.4/7144-dpaa-call-arch_setup_dma_ops-before-using-dma_ops.patch b/target/linux/layerscape/patches-4.4/7144-dpaa-call-arch_setup_dma_ops-before-using-dma_ops.patch
new file mode 100644
index 0000000..ef8cf01
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7144-dpaa-call-arch_setup_dma_ops-before-using-dma_ops.patch
@@ -0,0 +1,53 @@
+From 0ac69de37277aec31d18a8c7b9d9a3a65b629526 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu at nxp.com>
+Date: Wed, 12 Oct 2016 16:30:57 +0800
+Subject: [PATCH 144/226] dpaa: call arch_setup_dma_ops before using dma_ops
+
+A previous patch caused dpaa call trace. This patch provides
+a temporary workaround for this until this is fixed by upstream.
+
+Fixes: 1dccb598df54 ("arm64: simplify dma_get_ops")
+Signed-off-by: Yangbo Lu <yangbo.lu at nxp.com>
+---
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 12 ++++++------
+ drivers/staging/fsl_qbman/qman_high.c | 1 +
+ 2 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
+@@ -754,6 +754,12 @@ dpa_bp_alloc(struct dpa_bp *dpa_bp)
+ goto pdev_register_failed;
+ }
+
++#ifdef CONFIG_FMAN_ARM
++ /* force coherency */
++ pdev->dev.archdata.dma_coherent = true;
++ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
++#endif
++
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ goto pdev_mask_failed;
+@@ -765,12 +771,6 @@ dpa_bp_alloc(struct dpa_bp *dpa_bp)
+ goto pdev_mask_failed;
+ }
+
+-#ifdef CONFIG_FMAN_ARM
+- /* force coherency */
+- pdev->dev.archdata.dma_coherent = true;
+- arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
+-#endif
+-
+ dpa_bp->dev = &pdev->dev;
+
+ if (dpa_bp->seed_cb) {
+--- a/drivers/staging/fsl_qbman/qman_high.c
++++ b/drivers/staging/fsl_qbman/qman_high.c
+@@ -662,6 +662,7 @@ struct qman_portal *qman_create_portal(
+ portal->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
+ portal->pdev->dev.dma_mask = &portal->pdev->dev.coherent_dma_mask;
+ #else
++ arch_setup_dma_ops(&portal->pdev->dev, 0, 0, NULL, false);
+ if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) {
+ pr_err("qman_portal - dma_set_mask() failed\n");
+ goto fail_devadd;
diff --git a/target/linux/layerscape/patches-4.4/7145-staging-fsl-mc-Added-generic-MSI-support-for-FSL-MC-.patch b/target/linux/layerscape/patches-4.4/7145-staging-fsl-mc-Added-generic-MSI-support-for-FSL-MC-.patch
new file mode 100644
index 0000000..1f3a9f0
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7145-staging-fsl-mc-Added-generic-MSI-support-for-FSL-MC-.patch
@@ -0,0 +1,400 @@
+From 8ebb892cd56d14e72580ab36c3b5eb2d4603a7fe Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:21 -0600
+Subject: [PATCH 145/226] staging: fsl-mc: Added generic MSI support for
+ FSL-MC devices
+
+Created an MSI domain for the fsl-mc bus-- including functions
+to create a domain, find a domain, alloc/free domain irqs, and
+bus specific overrides for domain and irq_chip ops.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/Kconfig | 1 +
+ drivers/staging/fsl-mc/bus/Makefile | 1 +
+ drivers/staging/fsl-mc/bus/mc-msi.c | 276 +++++++++++++++++++++++++++
+ drivers/staging/fsl-mc/include/dprc.h | 2 +-
+ drivers/staging/fsl-mc/include/mc-private.h | 17 ++
+ drivers/staging/fsl-mc/include/mc.h | 17 ++
+ 6 files changed, 313 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-msi.c
+
+--- a/drivers/staging/fsl-mc/bus/Kconfig
++++ b/drivers/staging/fsl-mc/bus/Kconfig
+@@ -9,6 +9,7 @@
+ config FSL_MC_BUS
+ tristate "Freescale Management Complex (MC) bus driver"
+ depends on OF && ARM64
++ select GENERIC_MSI_IRQ_DOMAIN
+ help
+ Driver to enable the bus infrastructure for the Freescale
+ QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
+--- a/drivers/staging/fsl-mc/bus/Makefile
++++ b/drivers/staging/fsl-mc/bus/Makefile
+@@ -13,5 +13,6 @@ mc-bus-driver-objs := mc-bus.o \
+ dpmng.o \
+ dprc-driver.o \
+ mc-allocator.o \
++ mc-msi.o \
+ dpmcp.o \
+ dpbp.o
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/mc-msi.c
+@@ -0,0 +1,276 @@
++/*
++ * Freescale Management Complex (MC) bus driver MSI support
++ *
++ * Copyright (C) 2015 Freescale Semiconductor, Inc.
++ * Author: German Rivera <German.Rivera at freescale.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include "../include/mc-private.h"
++#include <linux/of_device.h>
++#include <linux/of_address.h>
++#include <linux/irqchip/arm-gic-v3.h>
++#include <linux/of_irq.h>
++#include <linux/irq.h>
++#include <linux/irqdomain.h>
++#include <linux/msi.h>
++#include "../include/mc-sys.h"
++#include "dprc-cmd.h"
++
++static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
++ struct msi_desc *desc)
++{
++ arg->desc = desc;
++ arg->hwirq = (irq_hw_number_t)desc->fsl_mc.msi_index;
++}
++
++static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
++{
++ struct msi_domain_ops *ops = info->ops;
++
++ if (WARN_ON(!ops))
++ return;
++
++ /*
++ * set_desc should not be set by the caller
++ */
++ if (WARN_ON(ops->set_desc))
++ return;
++
++ ops->set_desc = fsl_mc_msi_set_desc;
++}
++
++static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
++ struct fsl_mc_device_irq *mc_dev_irq)
++{
++ int error;
++ struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
++ struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
++ struct dprc_irq_cfg irq_cfg;
++
++ /*
++ * msi_desc->msg.address is 0x0 when this function is invoked in
++ * the free_irq() code path. In this case, for the MC, we don't
++ * really need to "unprogram" the MSI, so we just return.
++ */
++ if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
++ return;
++
++ if (WARN_ON(!owner_mc_dev))
++ return;
++
++ irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
++ msi_desc->msg.address_lo;
++ irq_cfg.val = msi_desc->msg.data;
++ irq_cfg.user_irq_id = msi_desc->irq;
++
++ if (owner_mc_dev == mc_bus_dev) {
++ /*
++ * IRQ is for the mc_bus_dev's DPRC itself
++ */
++ error = dprc_set_irq(mc_bus_dev->mc_io,
++ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
++ mc_bus_dev->mc_handle,
++ mc_dev_irq->dev_irq_index,
++ &irq_cfg);
++ if (error < 0) {
++ dev_err(&owner_mc_dev->dev,
++ "dprc_set_irq() failed: %d\n", error);
++ }
++ } else {
++ /*
++ * IRQ is for for a child device of mc_bus_dev
++ */
++ error = dprc_set_obj_irq(mc_bus_dev->mc_io,
++ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
++ mc_bus_dev->mc_handle,
++ owner_mc_dev->obj_desc.type,
++ owner_mc_dev->obj_desc.id,
++ mc_dev_irq->dev_irq_index,
++ &irq_cfg);
++ if (error < 0) {
++ dev_err(&owner_mc_dev->dev,
++ "dprc_obj_set_irq() failed: %d\n", error);
++ }
++ }
++}
++
++/*
++ * NOTE: This function is invoked with interrupts disabled
++ */
++static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
++ struct msi_msg *msg)
++{
++ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
++ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
++ struct fsl_mc_device_irq *mc_dev_irq =
++ &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
++
++ WARN_ON(mc_dev_irq->msi_desc != msi_desc);
++ msi_desc->msg = *msg;
++
++ /*
++ * Program the MSI (paddr, value) pair in the device:
++ */
++ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
++}
++
++static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
++{
++ struct irq_chip *chip = info->chip;
++
++ if (WARN_ON((!chip)))
++ return;
++
++ /*
++ * irq_write_msi_msg should not be set by the caller
++ */
++ if (WARN_ON(chip->irq_write_msi_msg))
++ return;
++
++ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
++}
++
++/**
++ * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
++ * @np: Optional device-tree node of the interrupt controller
++ * @info: MSI domain info
++ * @parent: Parent irq domain
++ *
++ * Updates the domain and chip ops and creates a fsl-mc MSI
++ * interrupt domain.
++ *
++ * Returns:
++ * A domain pointer or NULL in case of failure.
++ */
++struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
++ struct msi_domain_info *info,
++ struct irq_domain *parent)
++{
++ struct irq_domain *domain;
++
++ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
++ fsl_mc_msi_update_dom_ops(info);
++ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
++ fsl_mc_msi_update_chip_ops(info);
++
++ domain = msi_create_irq_domain(fwnode, info, parent);
++ if (domain)
++ domain->bus_token = DOMAIN_BUS_FSL_MC_MSI;
++
++ return domain;
++}
++
++int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
++ struct irq_domain **mc_msi_domain)
++{
++ struct irq_domain *msi_domain;
++ struct device_node *mc_of_node = mc_platform_dev->of_node;
++
++ msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
++ DOMAIN_BUS_FSL_MC_MSI);
++ if (!msi_domain) {
++ pr_err("Unable to find fsl-mc MSI domain for %s\n",
++ mc_of_node->full_name);
++
++ return -ENOENT;
++ }
++
++ *mc_msi_domain = msi_domain;
++ return 0;
++}
++
++static void fsl_mc_msi_free_descs(struct device *dev)
++{
++ struct msi_desc *desc, *tmp;
++
++ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
++ list_del(&desc->list);
++ free_msi_entry(desc);
++ }
++}
++
++static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
++
++{
++ unsigned int i;
++ int error;
++ struct msi_desc *msi_desc;
++
++ for (i = 0; i < irq_count; i++) {
++ msi_desc = alloc_msi_entry(dev);
++ if (!msi_desc) {
++ dev_err(dev, "Failed to allocate msi entry\n");
++ error = -ENOMEM;
++ goto cleanup_msi_descs;
++ }
++
++ msi_desc->fsl_mc.msi_index = i;
++ msi_desc->nvec_used = 1;
++ INIT_LIST_HEAD(&msi_desc->list);
++ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
++ }
++
++ return 0;
++
++cleanup_msi_descs:
++ fsl_mc_msi_free_descs(dev);
++ return error;
++}
++
++int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
++ unsigned int irq_count)
++{
++ struct irq_domain *msi_domain;
++ int error;
++
++ if (WARN_ON(!list_empty(dev_to_msi_list(dev))))
++ return -EINVAL;
++
++ error = fsl_mc_msi_alloc_descs(dev, irq_count);
++ if (error < 0)
++ return error;
++
++ msi_domain = dev_get_msi_domain(dev);
++ if (WARN_ON(!msi_domain)) {
++ error = -EINVAL;
++ goto cleanup_msi_descs;
++ }
++
++ /*
++ * NOTE: Calling this function will trigger the invocation of the
++ * its_fsl_mc_msi_prepare() callback
++ */
++ error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
++
++ if (error) {
++ dev_err(dev, "Failed to allocate IRQs\n");
++ goto cleanup_msi_descs;
++ }
++
++ return 0;
++
++cleanup_msi_descs:
++ fsl_mc_msi_free_descs(dev);
++ return error;
++}
++
++void fsl_mc_msi_domain_free_irqs(struct device *dev)
++{
++ struct irq_domain *msi_domain;
++
++ msi_domain = dev_get_msi_domain(dev);
++ if (WARN_ON(!msi_domain))
++ return;
++
++ msi_domain_free_irqs(msi_domain, dev);
++
++ if (WARN_ON(list_empty(dev_to_msi_list(dev))))
++ return;
++
++ fsl_mc_msi_free_descs(dev);
++}
+--- a/drivers/staging/fsl-mc/include/dprc.h
++++ b/drivers/staging/fsl-mc/include/dprc.h
+@@ -176,7 +176,7 @@ int dprc_reset_container(struct fsl_mc_i
+ * @user_irq_id: A user defined number associated with this IRQ
+ */
+ struct dprc_irq_cfg {
+- u64 paddr;
++ phys_addr_t paddr;
+ u32 val;
+ int user_irq_id;
+ };
+--- a/drivers/staging/fsl-mc/include/mc-private.h
++++ b/drivers/staging/fsl-mc/include/mc-private.h
+@@ -26,6 +26,9 @@
+ strcmp(_obj_type, "dpmcp") == 0 || \
+ strcmp(_obj_type, "dpcon") == 0)
+
++struct irq_domain;
++struct msi_domain_info;
++
+ /**
+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
+ * @root_mc_bus_dev: MC object device representing the root DPRC
+@@ -79,11 +82,13 @@ struct fsl_mc_resource_pool {
+ * @resource_pools: array of resource pools (one pool per resource type)
+ * for this MC bus. These resources represent allocatable entities
+ * from the physical DPRC.
++ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
+ * @scan_mutex: Serializes bus scanning
+ */
+ struct fsl_mc_bus {
+ struct fsl_mc_device mc_dev;
+ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
++ struct fsl_mc_device_irq *irq_resources;
+ struct mutex scan_mutex; /* serializes bus scanning */
+ };
+
+@@ -116,4 +121,16 @@ int __must_check fsl_mc_resource_allocat
+
+ void fsl_mc_resource_free(struct fsl_mc_resource *resource);
+
++struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
++ struct msi_domain_info *info,
++ struct irq_domain *parent);
++
++int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
++ struct irq_domain **mc_msi_domain);
++
++int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
++ unsigned int irq_count);
++
++void fsl_mc_msi_domain_free_irqs(struct device *dev);
++
+ #endif /* _FSL_MC_PRIVATE_H_ */
+--- a/drivers/staging/fsl-mc/include/mc.h
++++ b/drivers/staging/fsl-mc/include/mc.h
+@@ -104,6 +104,23 @@ struct fsl_mc_resource {
+ };
+
+ /**
++ * struct fsl_mc_device_irq - MC object device message-based interrupt
++ * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
++ * @mc_dev: MC object device that owns this interrupt
++ * @dev_irq_index: device-relative IRQ index
++ * @resource: MC generic resource associated with the interrupt
++ */
++struct fsl_mc_device_irq {
++ struct msi_desc *msi_desc;
++ struct fsl_mc_device *mc_dev;
++ u8 dev_irq_index;
++ struct fsl_mc_resource resource;
++};
++
++#define to_fsl_mc_irq(_mc_resource) \
++ container_of(_mc_resource, struct fsl_mc_device_irq, resource)
++
++/**
+ * Bit masks for a MC object device (struct fsl_mc_device) flags
+ */
+ #define FSL_MC_IS_DPRC 0x0001
diff --git a/target/linux/layerscape/patches-4.4/7146-staging-fsl-mc-Added-GICv3-ITS-support-for-FSL-MC-MS.patch b/target/linux/layerscape/patches-4.4/7146-staging-fsl-mc-Added-GICv3-ITS-support-for-FSL-MC-MS.patch
new file mode 100644
index 0000000..5b254d7
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7146-staging-fsl-mc-Added-GICv3-ITS-support-for-FSL-MC-MS.patch
@@ -0,0 +1,167 @@
+From 85cb8ae26b6c69f0a118f32b7b7cd4f22d782da3 Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:22 -0600
+Subject: [PATCH 146/226] staging: fsl-mc: Added GICv3-ITS support for FSL-MC
+ MSIs
+
+Added platform-specific MSI support layer for FSL-MC devices.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/Makefile | 1 +
+ .../staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 127 ++++++++++++++++++++
+ drivers/staging/fsl-mc/include/mc-private.h | 4 +
+ 3 files changed, 132 insertions(+)
+ create mode 100644 drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+
+--- a/drivers/staging/fsl-mc/bus/Makefile
++++ b/drivers/staging/fsl-mc/bus/Makefile
+@@ -14,5 +14,6 @@ mc-bus-driver-objs := mc-bus.o \
+ dprc-driver.o \
+ mc-allocator.o \
+ mc-msi.o \
++ irq-gic-v3-its-fsl-mc-msi.o \
+ dpmcp.o \
+ dpbp.o
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+@@ -0,0 +1,127 @@
++/*
++ * Freescale Management Complex (MC) bus driver MSI support
++ *
++ * Copyright (C) 2015 Freescale Semiconductor, Inc.
++ * Author: German Rivera <German.Rivera at freescale.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include "../include/mc-private.h"
++#include <linux/of_device.h>
++#include <linux/of_address.h>
++#include <linux/irqchip/arm-gic-v3.h>
++#include <linux/irq.h>
++#include <linux/msi.h>
++#include <linux/of.h>
++#include <linux/of_irq.h>
++#include "../include/mc-sys.h"
++#include "dprc-cmd.h"
++
++static struct irq_chip its_msi_irq_chip = {
++ .name = "fsl-mc-bus-msi",
++ .irq_mask = irq_chip_mask_parent,
++ .irq_unmask = irq_chip_unmask_parent,
++ .irq_eoi = irq_chip_eoi_parent,
++ .irq_set_affinity = msi_domain_set_affinity
++};
++
++static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
++ struct device *dev,
++ int nvec, msi_alloc_info_t *info)
++{
++ struct fsl_mc_device *mc_bus_dev;
++ struct msi_domain_info *msi_info;
++
++ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
++ return -EINVAL;
++
++ mc_bus_dev = to_fsl_mc_device(dev);
++ if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC)))
++ return -EINVAL;
++
++ /*
++ * Set the device Id to be passed to the GIC-ITS:
++ *
++ * NOTE: This device id corresponds to the IOMMU stream ID
++ * associated with the DPRC object (ICID).
++ */
++ info->scratchpad[0].ul = mc_bus_dev->icid;
++ msi_info = msi_get_domain_info(msi_domain->parent);
++ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
++}
++
++static struct msi_domain_ops its_fsl_mc_msi_ops = {
++ .msi_prepare = its_fsl_mc_msi_prepare,
++};
++
++static struct msi_domain_info its_fsl_mc_msi_domain_info = {
++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
++ .ops = &its_fsl_mc_msi_ops,
++ .chip = &its_msi_irq_chip,
++};
++
++static const struct of_device_id its_device_id[] = {
++ { .compatible = "arm,gic-v3-its", },
++ {},
++};
++
++int __init its_fsl_mc_msi_init(void)
++{
++ struct device_node *np;
++ struct irq_domain *parent;
++ struct irq_domain *mc_msi_domain;
++
++ for (np = of_find_matching_node(NULL, its_device_id); np;
++ np = of_find_matching_node(np, its_device_id)) {
++ if (!of_property_read_bool(np, "msi-controller"))
++ continue;
++
++ parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
++ if (!parent || !msi_get_domain_info(parent)) {
++ pr_err("%s: unable to locate ITS domain\n",
++ np->full_name);
++ continue;
++ }
++
++ mc_msi_domain = fsl_mc_msi_create_irq_domain(
++ of_node_to_fwnode(np),
++ &its_fsl_mc_msi_domain_info,
++ parent);
++ if (!mc_msi_domain) {
++ pr_err("%s: unable to create fsl-mc domain\n",
++ np->full_name);
++ continue;
++ }
++
++ WARN_ON(mc_msi_domain->
++ host_data != &its_fsl_mc_msi_domain_info);
++
++ pr_info("fsl-mc MSI: %s domain created\n", np->full_name);
++ }
++
++ return 0;
++}
++
++void its_fsl_mc_msi_cleanup(void)
++{
++ struct device_node *np;
++
++ for (np = of_find_matching_node(NULL, its_device_id); np;
++ np = of_find_matching_node(np, its_device_id)) {
++ struct irq_domain *mc_msi_domain = irq_find_matching_host(
++ np,
++ DOMAIN_BUS_FSL_MC_MSI);
++
++ if (!of_property_read_bool(np, "msi-controller"))
++ continue;
++
++ mc_msi_domain = irq_find_matching_host(np,
++ DOMAIN_BUS_FSL_MC_MSI);
++ if (mc_msi_domain &&
++ mc_msi_domain->host_data == &its_fsl_mc_msi_domain_info)
++ irq_domain_remove(mc_msi_domain);
++ }
++}
+--- a/drivers/staging/fsl-mc/include/mc-private.h
++++ b/drivers/staging/fsl-mc/include/mc-private.h
+@@ -133,4 +133,8 @@ int fsl_mc_msi_domain_alloc_irqs(struct
+
+ void fsl_mc_msi_domain_free_irqs(struct device *dev);
+
++int __init its_fsl_mc_msi_init(void);
++
++void its_fsl_mc_msi_cleanup(void);
++
+ #endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/target/linux/layerscape/patches-4.4/7147-staging-fsl-mc-Extended-MC-bus-allocator-to-include-.patch b/target/linux/layerscape/patches-4.4/7147-staging-fsl-mc-Extended-MC-bus-allocator-to-include-.patch
new file mode 100644
index 0000000..c02c892
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7147-staging-fsl-mc-Extended-MC-bus-allocator-to-include-.patch
@@ -0,0 +1,326 @@
+From 23b09c6b4162a8264b600f35d7048256a7afc0cd Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:23 -0600
+Subject: [PATCH 147/226] staging: fsl-mc: Extended MC bus allocator to
+ include IRQs
+
+All the IRQs for DPAA2 objects in the same DPRC must use
+the ICID of that DPRC, as their device Id in the GIC-ITS.
+Thus, all these IRQs must share the same ITT table in the GIC.
+As a result, a pool of IRQs with the same device Id must be
+preallocated per DPRC (fsl-mc bus instance). So, the fsl-mc
+bus object allocator is extended to also provide services
+to allocate IRQs to DPAA2 devices, from their parent fsl-mc bus
+IRQ pool.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-allocator.c | 199 +++++++++++++++++++++++++++
+ drivers/staging/fsl-mc/include/mc-private.h | 15 ++
+ drivers/staging/fsl-mc/include/mc.h | 9 ++
+ 3 files changed, 223 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
++++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
+@@ -15,6 +15,7 @@
+ #include "../include/dpcon-cmd.h"
+ #include "dpmcp-cmd.h"
+ #include "dpmcp.h"
++#include <linux/msi.h>
+
+ /**
+ * fsl_mc_resource_pool_add_device - add allocatable device to a resource
+@@ -160,6 +161,7 @@ static const char *const fsl_mc_pool_typ
+ [FSL_MC_POOL_DPMCP] = "dpmcp",
+ [FSL_MC_POOL_DPBP] = "dpbp",
+ [FSL_MC_POOL_DPCON] = "dpcon",
++ [FSL_MC_POOL_IRQ] = "irq",
+ };
+
+ static int __must_check object_type_to_pool_type(const char *object_type,
+@@ -465,6 +467,203 @@ void fsl_mc_object_free(struct fsl_mc_de
+ }
+ EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+
++/*
++ * Initialize the interrupt pool associated with a MC bus.
++ * It allocates a block of IRQs from the GIC-ITS
++ */
++int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
++ unsigned int irq_count)
++{
++ unsigned int i;
++ struct msi_desc *msi_desc;
++ struct fsl_mc_device_irq *irq_resources;
++ struct fsl_mc_device_irq *mc_dev_irq;
++ int error;
++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
++ struct fsl_mc_resource_pool *res_pool =
++ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
++
++ if (WARN_ON(irq_count == 0 ||
++ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS))
++ return -EINVAL;
++
++ error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
++ if (error < 0)
++ return error;
++
++ irq_resources = devm_kzalloc(&mc_bus_dev->dev,
++ sizeof(*irq_resources) * irq_count,
++ GFP_KERNEL);
++ if (!irq_resources) {
++ error = -ENOMEM;
++ goto cleanup_msi_irqs;
++ }
++
++ for (i = 0; i < irq_count; i++) {
++ mc_dev_irq = &irq_resources[i];
++
++ /*
++ * NOTE: This mc_dev_irq's MSI addr/value pair will be set
++ * by the fsl_mc_msi_write_msg() callback
++ */
++ mc_dev_irq->resource.type = res_pool->type;
++ mc_dev_irq->resource.data = mc_dev_irq;
++ mc_dev_irq->resource.parent_pool = res_pool;
++ INIT_LIST_HEAD(&mc_dev_irq->resource.node);
++ list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
++ }
++
++ for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
++ mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
++ mc_dev_irq->msi_desc = msi_desc;
++ mc_dev_irq->resource.id = msi_desc->irq;
++ }
++
++ res_pool->max_count = irq_count;
++ res_pool->free_count = irq_count;
++ mc_bus->irq_resources = irq_resources;
++ return 0;
++
++cleanup_msi_irqs:
++ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
++ return error;
++}
++EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
++
++/**
++ * Teardown the interrupt pool associated with an MC bus.
++ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
++ */
++void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
++{
++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
++ struct fsl_mc_resource_pool *res_pool =
++ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
++
++ if (WARN_ON(!mc_bus->irq_resources))
++ return;
++
++ if (WARN_ON(res_pool->max_count == 0))
++ return;
++
++ if (WARN_ON(res_pool->free_count != res_pool->max_count))
++ return;
++
++ INIT_LIST_HEAD(&res_pool->free_list);
++ res_pool->max_count = 0;
++ res_pool->free_count = 0;
++ mc_bus->irq_resources = NULL;
++ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
++}
++EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
++
++/**
++ * It allocates the IRQs required by a given MC object device. The
++ * IRQs are allocated from the interrupt pool associated with the
++ * MC bus that contains the device, if the device is not a DPRC device.
++ * Otherwise, the IRQs are allocated from the interrupt pool associated
++ * with the MC bus that represents the DPRC device itself.
++ */
++int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
++{
++ int i;
++ int irq_count;
++ int res_allocated_count = 0;
++ int error = -EINVAL;
++ struct fsl_mc_device_irq **irqs = NULL;
++ struct fsl_mc_bus *mc_bus;
++ struct fsl_mc_resource_pool *res_pool;
++
++ if (WARN_ON(mc_dev->irqs))
++ return -EINVAL;
++
++ irq_count = mc_dev->obj_desc.irq_count;
++ if (WARN_ON(irq_count == 0))
++ return -EINVAL;
++
++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
++ mc_bus = to_fsl_mc_bus(mc_dev);
++ else
++ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
++
++ if (WARN_ON(!mc_bus->irq_resources))
++ return -EINVAL;
++
++ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
++ if (res_pool->free_count < irq_count) {
++ dev_err(&mc_dev->dev,
++ "Not able to allocate %u irqs for device\n", irq_count);
++ return -ENOSPC;
++ }
++
++ irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]),
++ GFP_KERNEL);
++ if (!irqs)
++ return -ENOMEM;
++
++ for (i = 0; i < irq_count; i++) {
++ struct fsl_mc_resource *resource;
++
++ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
++ &resource);
++ if (error < 0)
++ goto error_resource_alloc;
++
++ irqs[i] = to_fsl_mc_irq(resource);
++ res_allocated_count++;
++
++ WARN_ON(irqs[i]->mc_dev);
++ irqs[i]->mc_dev = mc_dev;
++ irqs[i]->dev_irq_index = i;
++ }
++
++ mc_dev->irqs = irqs;
++ return 0;
++
++error_resource_alloc:
++ for (i = 0; i < res_allocated_count; i++) {
++ irqs[i]->mc_dev = NULL;
++ fsl_mc_resource_free(&irqs[i]->resource);
++ }
++
++ return error;
++}
++EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
++
++/*
++ * It frees the IRQs that were allocated for a MC object device, by
++ * returning them to the corresponding interrupt pool.
++ */
++void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
++{
++ int i;
++ int irq_count;
++ struct fsl_mc_bus *mc_bus;
++ struct fsl_mc_device_irq **irqs = mc_dev->irqs;
++
++ if (WARN_ON(!irqs))
++ return;
++
++ irq_count = mc_dev->obj_desc.irq_count;
++
++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
++ mc_bus = to_fsl_mc_bus(mc_dev);
++ else
++ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
++
++ if (WARN_ON(!mc_bus->irq_resources))
++ return;
++
++ for (i = 0; i < irq_count; i++) {
++ WARN_ON(!irqs[i]->mc_dev);
++ irqs[i]->mc_dev = NULL;
++ fsl_mc_resource_free(&irqs[i]->resource);
++ }
++
++ mc_dev->irqs = NULL;
++}
++EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
++
+ /**
+ * fsl_mc_allocator_probe - callback invoked when an allocatable device is
+ * being added to the system
+--- a/drivers/staging/fsl-mc/include/mc-private.h
++++ b/drivers/staging/fsl-mc/include/mc-private.h
+@@ -30,6 +30,16 @@ struct irq_domain;
+ struct msi_domain_info;
+
+ /**
++ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
++ * IRQ pool
++ */
++#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
++
++struct device_node;
++struct irq_domain;
++struct msi_domain_info;
++
++/**
+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
+ * @root_mc_bus_dev: MC object device representing the root DPRC
+ * @num_translation_ranges: number of entries in addr_translation_ranges
+@@ -137,4 +147,9 @@ int __init its_fsl_mc_msi_init(void);
+
+ void its_fsl_mc_msi_cleanup(void);
+
++int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
++ unsigned int irq_count);
++
++void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
++
+ #endif /* _FSL_MC_PRIVATE_H_ */
+--- a/drivers/staging/fsl-mc/include/mc.h
++++ b/drivers/staging/fsl-mc/include/mc.h
+@@ -14,12 +14,14 @@
+ #include <linux/device.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/list.h>
++#include <linux/interrupt.h>
+ #include "../include/dprc.h"
+
+ #define FSL_MC_VENDOR_FREESCALE 0x1957
+
+ struct fsl_mc_device;
+ struct fsl_mc_io;
++struct fsl_mc_bus;
+
+ /**
+ * struct fsl_mc_driver - MC object device driver object
+@@ -75,6 +77,7 @@ enum fsl_mc_pool_type {
+ FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */
+ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */
+ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */
++ FSL_MC_POOL_IRQ,
+
+ /*
+ * NOTE: New resource pool types must be added before this entry
+@@ -141,6 +144,7 @@ struct fsl_mc_device_irq {
+ * NULL if none.
+ * @obj_desc: MC description of the DPAA device
+ * @regions: pointer to array of MMIO region entries
++ * @irqs: pointer to array of pointers to interrupts allocated to this device
+ * @resource: generic resource associated with this MC object device, if any.
+ *
+ * Generic device object for MC object devices that are "attached" to a
+@@ -172,6 +176,7 @@ struct fsl_mc_device {
+ struct fsl_mc_io *mc_io;
+ struct dprc_obj_desc obj_desc;
+ struct resource *regions;
++ struct fsl_mc_device_irq **irqs;
+ struct fsl_mc_resource *resource;
+ };
+
+@@ -215,6 +220,10 @@ int __must_check fsl_mc_object_allocate(
+
+ void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
+
++int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
++
++void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
++
+ extern struct bus_type fsl_mc_bus_type;
+
+ #endif /* _FSL_MC_H_ */
diff --git a/target/linux/layerscape/patches-4.4/7148-staging-fsl-mc-Changed-DPRC-built-in-portal-s-mc_io-.patch b/target/linux/layerscape/patches-4.4/7148-staging-fsl-mc-Changed-DPRC-built-in-portal-s-mc_io-.patch
new file mode 100644
index 0000000..17dcb75
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7148-staging-fsl-mc-Changed-DPRC-built-in-portal-s-mc_io-.patch
@@ -0,0 +1,44 @@
+From 0f2a65dea2024b7898e3c0b42e0a7864d6538567 Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:24 -0600
+Subject: [PATCH 148/226] staging: fsl-mc: Changed DPRC built-in portal's
+ mc_io to be atomic
+
+The DPRC built-in portal's mc_io is used to send commands to the MC
+to program MSIs for MC objects. This is done by the
+fsl_mc_msi_write_msg() callback, which is invoked by the generic MSI
+layer with interrupts disabled. As a result, the mc_io used in
+fsl_mc_msi_write_msg needs to be an atomic mc_io.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 4 +++-
+ drivers/staging/fsl-mc/bus/mc-bus.c | 3 ++-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -396,7 +396,9 @@ static int dprc_probe(struct fsl_mc_devi
+ error = fsl_create_mc_io(&mc_dev->dev,
+ mc_dev->regions[0].start,
+ region_size,
+- NULL, 0, &mc_dev->mc_io);
++ NULL,
++ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &mc_dev->mc_io);
+ if (error < 0)
+ return error;
+ }
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -702,7 +702,8 @@ static int fsl_mc_bus_probe(struct platf
+ mc_portal_phys_addr = res.start;
+ mc_portal_size = resource_size(&res);
+ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
+- mc_portal_size, NULL, 0, &mc_io);
++ mc_portal_size, NULL,
++ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
+ if (error < 0)
+ return error;
+
diff --git a/target/linux/layerscape/patches-4.4/7149-staging-fsl-mc-Populate-the-IRQ-pool-for-an-MC-bus-i.patch b/target/linux/layerscape/patches-4.4/7149-staging-fsl-mc-Populate-the-IRQ-pool-for-an-MC-bus-i.patch
new file mode 100644
index 0000000..e72d5a7
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7149-staging-fsl-mc-Populate-the-IRQ-pool-for-an-MC-bus-i.patch
@@ -0,0 +1,109 @@
+From 78ab7589777526022757e9c95b9d5864786eb4e5 Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:25 -0600
+Subject: [PATCH 149/226] staging: fsl-mc: Populate the IRQ pool for an MC bus
+ instance
+
+Scan the corresponding DPRC container to get total count
+of IRQs needed by all its child DPAA2 objects. Then,
+preallocate a set of MSI IRQs with the DPRC's ICID
+(GIT-ITS device Id) to populate the the DPRC's IRQ pool.
+Each child DPAA2 object in the DPRC and the DPRC object itself
+will allocate their necessary MSI IRQs from the DPRC's IRQ pool,
+in their driver probe function.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 24 ++++++++++++++++++++++--
+ drivers/staging/fsl-mc/include/mc-private.h | 3 ++-
+ 2 files changed, 24 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -241,6 +241,7 @@ static void dprc_cleanup_all_resource_po
+ * dprc_scan_objects - Discover objects in a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ * @total_irq_count: total number of IRQs needed by objects in the DPRC.
+ *
+ * Detects objects added and removed from a DPRC and synchronizes the
+ * state of the Linux bus driver, MC by adding and removing
+@@ -254,11 +255,13 @@ static void dprc_cleanup_all_resource_po
+ * populated before they can get allocation requests from probe callbacks
+ * of the device drivers for the non-allocatable devices.
+ */
+-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev)
++int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
++ unsigned int *total_irq_count)
+ {
+ int num_child_objects;
+ int dprc_get_obj_failures;
+ int error;
++ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
+ struct dprc_obj_desc *child_obj_desc_array = NULL;
+
+ error = dprc_get_obj_count(mc_bus_dev->mc_io,
+@@ -307,6 +310,7 @@ int dprc_scan_objects(struct fsl_mc_devi
+ continue;
+ }
+
++ irq_count += obj_desc->irq_count;
+ dev_dbg(&mc_bus_dev->dev,
+ "Discovered object: type %s, id %d\n",
+ obj_desc->type, obj_desc->id);
+@@ -319,6 +323,7 @@ int dprc_scan_objects(struct fsl_mc_devi
+ }
+ }
+
++ *total_irq_count = irq_count;
+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+@@ -344,6 +349,7 @@ EXPORT_SYMBOL_GPL(dprc_scan_objects);
+ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
+ {
+ int error;
++ unsigned int irq_count;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ dprc_init_all_resource_pools(mc_bus_dev);
+@@ -352,11 +358,25 @@ int dprc_scan_container(struct fsl_mc_de
+ * Discover objects in the DPRC:
+ */
+ mutex_lock(&mc_bus->scan_mutex);
+- error = dprc_scan_objects(mc_bus_dev);
++ error = dprc_scan_objects(mc_bus_dev, &irq_count);
+ mutex_unlock(&mc_bus->scan_mutex);
+ if (error < 0)
+ goto error;
+
++ if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
++ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
++ dev_warn(&mc_bus_dev->dev,
++ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
++ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
++ }
++
++ error = fsl_mc_populate_irq_pool(
++ mc_bus,
++ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
++ if (error < 0)
++ goto error;
++ }
++
+ return 0;
+ error:
+ dprc_cleanup_all_resource_pools(mc_bus_dev);
+--- a/drivers/staging/fsl-mc/include/mc-private.h
++++ b/drivers/staging/fsl-mc/include/mc-private.h
+@@ -114,7 +114,8 @@ void fsl_mc_device_remove(struct fsl_mc_
+
+ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
+
+-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev);
++int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
++ unsigned int *total_irq_count);
+
+ int __init dprc_driver_init(void);
+
diff --git a/target/linux/layerscape/patches-4.4/7150-staging-fsl-mc-set-MSI-domain-for-DPRC-objects.patch b/target/linux/layerscape/patches-4.4/7150-staging-fsl-mc-set-MSI-domain-for-DPRC-objects.patch
new file mode 100644
index 0000000..0c69783
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7150-staging-fsl-mc-set-MSI-domain-for-DPRC-objects.patch
@@ -0,0 +1,103 @@
+From 15bfab2641c61fb50a876860e8909ab84d2b8701 Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:26 -0600
+Subject: [PATCH 150/226] staging: fsl-mc: set MSI domain for DPRC objects
+
+THE MSI domain associated with a root DPRC object is
+obtained form the device tree. Child DPRCs inherit
+the parent DPRC MSI domain.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 39 ++++++++++++++++++++++++++++++
+ 1 file changed, 39 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -13,6 +13,7 @@
+ #include "../include/mc-sys.h"
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/interrupt.h>
+ #include "dprc-cmd.h"
+
+ struct dprc_child_objs {
+@@ -398,11 +399,16 @@ static int dprc_probe(struct fsl_mc_devi
+ {
+ int error;
+ size_t region_size;
++ struct device *parent_dev = mc_dev->dev.parent;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
++ bool msi_domain_set = false;
+
+ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
+ return -EINVAL;
+
++ if (WARN_ON(dev_get_msi_domain(&mc_dev->dev)))
++ return -EINVAL;
++
+ if (!mc_dev->mc_io) {
+ /*
+ * This is a child DPRC:
+@@ -421,6 +427,30 @@ static int dprc_probe(struct fsl_mc_devi
+ &mc_dev->mc_io);
+ if (error < 0)
+ return error;
++ /*
++ * Inherit parent MSI domain:
++ */
++ dev_set_msi_domain(&mc_dev->dev,
++ dev_get_msi_domain(parent_dev));
++ msi_domain_set = true;
++ } else {
++ /*
++ * This is a root DPRC
++ */
++ struct irq_domain *mc_msi_domain;
++
++ if (WARN_ON(parent_dev->bus == &fsl_mc_bus_type))
++ return -EINVAL;
++
++ error = fsl_mc_find_msi_domain(parent_dev,
++ &mc_msi_domain);
++ if (error < 0) {
++ dev_warn(&mc_dev->dev,
++ "WARNING: MC bus without interrupt support\n");
++ } else {
++ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
++ msi_domain_set = true;
++ }
+ }
+
+ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+@@ -446,6 +476,9 @@ error_cleanup_open:
+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+
+ error_cleanup_mc_io:
++ if (msi_domain_set)
++ dev_set_msi_domain(&mc_dev->dev, NULL);
++
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ return error;
+ }
+@@ -463,6 +496,7 @@ error_cleanup_mc_io:
+ static int dprc_remove(struct fsl_mc_device *mc_dev)
+ {
+ int error;
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
+ return -EINVAL;
+@@ -475,6 +509,11 @@ static int dprc_remove(struct fsl_mc_dev
+ if (error < 0)
+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
+
++ if (dev_get_msi_domain(&mc_dev->dev)) {
++ fsl_mc_cleanup_irq_pool(mc_bus);
++ dev_set_msi_domain(&mc_dev->dev, NULL);
++ }
++
+ dev_info(&mc_dev->dev, "DPRC device unbound from driver");
+ return 0;
+ }
diff --git a/target/linux/layerscape/patches-4.4/7151-staging-fsl-mc-Fixed-bug-in-dprc_probe-error-path.patch b/target/linux/layerscape/patches-4.4/7151-staging-fsl-mc-Fixed-bug-in-dprc_probe-error-path.patch
new file mode 100644
index 0000000..3324048
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7151-staging-fsl-mc-Fixed-bug-in-dprc_probe-error-path.patch
@@ -0,0 +1,72 @@
+From 22aa842ae501ea8724afd45fcb0d7b17a67cb950 Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:27 -0600
+Subject: [PATCH 151/226] staging: fsl-mc: Fixed bug in dprc_probe() error
+ path
+
+Destroy mc_io in error path in dprc_probe() only if the mc_io was
+created in this function.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -401,6 +401,7 @@ static int dprc_probe(struct fsl_mc_devi
+ size_t region_size;
+ struct device *parent_dev = mc_dev->dev.parent;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
++ bool mc_io_created = false;
+ bool msi_domain_set = false;
+
+ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
+@@ -413,6 +414,9 @@ static int dprc_probe(struct fsl_mc_devi
+ /*
+ * This is a child DPRC:
+ */
++ if (WARN_ON(parent_dev->bus != &fsl_mc_bus_type))
++ return -EINVAL;
++
+ if (WARN_ON(mc_dev->obj_desc.region_count == 0))
+ return -EINVAL;
+
+@@ -427,6 +431,9 @@ static int dprc_probe(struct fsl_mc_devi
+ &mc_dev->mc_io);
+ if (error < 0)
+ return error;
++
++ mc_io_created = true;
++
+ /*
+ * Inherit parent MSI domain:
+ */
+@@ -457,7 +464,7 @@ static int dprc_probe(struct fsl_mc_devi
+ &mc_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
+- goto error_cleanup_mc_io;
++ goto error_cleanup_msi_domain;
+ }
+
+ mutex_init(&mc_bus->scan_mutex);
+@@ -475,11 +482,15 @@ static int dprc_probe(struct fsl_mc_devi
+ error_cleanup_open:
+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+
+-error_cleanup_mc_io:
++error_cleanup_msi_domain:
+ if (msi_domain_set)
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+
+- fsl_destroy_mc_io(mc_dev->mc_io);
++ if (mc_io_created) {
++ fsl_destroy_mc_io(mc_dev->mc_io);
++ mc_dev->mc_io = NULL;
++ }
++
+ return error;
+ }
+
diff --git a/target/linux/layerscape/patches-4.4/7152-staging-fsl-mc-Added-DPRC-interrupt-handler.patch b/target/linux/layerscape/patches-4.4/7152-staging-fsl-mc-Added-DPRC-interrupt-handler.patch
new file mode 100644
index 0000000..61b7ee7
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7152-staging-fsl-mc-Added-DPRC-interrupt-handler.patch
@@ -0,0 +1,301 @@
+From aa83997b14c31b34d9af24cb42726b55fa630464 Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:28 -0600
+Subject: [PATCH 152/226] staging: fsl-mc: Added DPRC interrupt handler
+
+The interrupt handler for DPRC IRQs is added. DPRC IRQs are
+generated for hot plug events related to DPAA2 objects in a given
+DPRC. These events include, creating/destroying DPAA2 objects in
+the DPRC, changing the "plugged" state of DPAA2 objects and moving
+objects between DPRCs.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 247 ++++++++++++++++++++++++++++++
+ 1 file changed, 247 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/interrupt.h>
++#include <linux/msi.h>
+ #include "dprc-cmd.h"
+
+ struct dprc_child_objs {
+@@ -386,6 +387,230 @@ error:
+ EXPORT_SYMBOL_GPL(dprc_scan_container);
+
+ /**
++ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
++ *
++ * @irq: IRQ number of the interrupt being handled
++ * @arg: Pointer to device structure
++ */
++static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
++}
++
++/**
++ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
++ *
++ * @irq: IRQ number of the interrupt being handled
++ * @arg: Pointer to device structure
++ */
++static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
++{
++ int error;
++ u32 status;
++ struct device *dev = (struct device *)arg;
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
++ struct fsl_mc_io *mc_io = mc_dev->mc_io;
++ struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
++
++ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
++ irq_num, smp_processor_id());
++
++ if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC)))
++ return IRQ_HANDLED;
++
++ mutex_lock(&mc_bus->scan_mutex);
++ if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
++ goto out;
++
++ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
++ &status);
++ if (error < 0) {
++ dev_err(dev,
++ "dprc_get_irq_status() failed: %d\n", error);
++ goto out;
++ }
++
++ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
++ status);
++ if (error < 0) {
++ dev_err(dev,
++ "dprc_clear_irq_status() failed: %d\n", error);
++ goto out;
++ }
++
++ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
++ DPRC_IRQ_EVENT_OBJ_REMOVED |
++ DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
++ DPRC_IRQ_EVENT_OBJ_DESTROYED |
++ DPRC_IRQ_EVENT_OBJ_CREATED)) {
++ unsigned int irq_count;
++
++ error = dprc_scan_objects(mc_dev, &irq_count);
++ if (error < 0) {
++ /*
++ * If the error is -ENXIO, we ignore it, as it indicates
++ * that the object scan was aborted, as we detected that
++ * an object was removed from the DPRC in the MC, while
++ * we were scanning the DPRC.
++ */
++ if (error != -ENXIO) {
++ dev_err(dev, "dprc_scan_objects() failed: %d\n",
++ error);
++ }
++
++ goto out;
++ }
++
++ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
++ dev_warn(dev,
++ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
++ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
++ }
++ }
++
++out:
++ mutex_unlock(&mc_bus->scan_mutex);
++ return IRQ_HANDLED;
++}
++
++/*
++ * Disable and clear interrupt for a given DPRC object
++ */
++static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
++{
++ int error;
++ struct fsl_mc_io *mc_io = mc_dev->mc_io;
++
++ WARN_ON(mc_dev->obj_desc.irq_count != 1);
++
++ /*
++ * Disable generation of interrupt, while we configure it:
++ */
++ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
++ error);
++ return error;
++ }
++
++ /*
++ * Disable all interrupt causes for the interrupt:
++ */
++ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
++ error);
++ return error;
++ }
++
++ /*
++ * Clear any leftover interrupts:
++ */
++ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
++ error);
++ return error;
++ }
++
++ return 0;
++}
++
++static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
++{
++ int error;
++ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
++
++ WARN_ON(mc_dev->obj_desc.irq_count != 1);
++
++ /*
++ * NOTE: devm_request_threaded_irq() invokes the device-specific
++ * function that programs the MSI physically in the device
++ */
++ error = devm_request_threaded_irq(&mc_dev->dev,
++ irq->msi_desc->irq,
++ dprc_irq0_handler,
++ dprc_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ "FSL MC DPRC irq0",
++ &mc_dev->dev);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "devm_request_threaded_irq() failed: %d\n",
++ error);
++ return error;
++ }
++
++ return 0;
++}
++
++static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
++{
++ int error;
++
++ /*
++ * Enable all interrupt causes for the interrupt:
++ */
++ error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
++ ~0x0u);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
++ error);
++
++ return error;
++ }
++
++ /*
++ * Enable generation of the interrupt:
++ */
++ error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
++ error);
++
++ return error;
++ }
++
++ return 0;
++}
++
++/*
++ * Setup interrupt for a given DPRC device
++ */
++static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
++{
++ int error;
++
++ error = fsl_mc_allocate_irqs(mc_dev);
++ if (error < 0)
++ return error;
++
++ error = disable_dprc_irq(mc_dev);
++ if (error < 0)
++ goto error_free_irqs;
++
++ error = register_dprc_irq_handler(mc_dev);
++ if (error < 0)
++ goto error_free_irqs;
++
++ error = enable_dprc_irq(mc_dev);
++ if (error < 0)
++ goto error_free_irqs;
++
++ return 0;
++
++error_free_irqs:
++ fsl_mc_free_irqs(mc_dev);
++ return error;
++}
++
++/**
+ * dprc_probe - callback invoked when a DPRC is being bound to this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
+@@ -476,6 +701,13 @@ static int dprc_probe(struct fsl_mc_devi
+ if (error < 0)
+ goto error_cleanup_open;
+
++ /*
++ * Configure interrupt for the DPRC object associated with this MC bus:
++ */
++ error = dprc_setup_irq(mc_dev);
++ if (error < 0)
++ goto error_cleanup_open;
++
+ dev_info(&mc_dev->dev, "DPRC device bound to driver");
+ return 0;
+
+@@ -494,6 +726,15 @@ error_cleanup_msi_domain:
+ return error;
+ }
+
++/*
++ * Tear down interrupt for a given DPRC object
++ */
++static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
++{
++ (void)disable_dprc_irq(mc_dev);
++ fsl_mc_free_irqs(mc_dev);
++}
++
+ /**
+ * dprc_remove - callback invoked when a DPRC is being unbound from this driver
+ *
+@@ -514,6 +755,12 @@ static int dprc_remove(struct fsl_mc_dev
+ if (WARN_ON(!mc_dev->mc_io))
+ return -EINVAL;
+
++ if (WARN_ON(!mc_bus->irq_resources))
++ return -EINVAL;
++
++ if (dev_get_msi_domain(&mc_dev->dev))
++ dprc_teardown_irq(mc_dev);
++
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+ dprc_cleanup_all_resource_pools(mc_dev);
+ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
diff --git a/target/linux/layerscape/patches-4.4/7153-staging-fsl-mc-Added-MSI-support-to-the-MC-bus-drive.patch b/target/linux/layerscape/patches-4.4/7153-staging-fsl-mc-Added-MSI-support-to-the-MC-bus-drive.patch
new file mode 100644
index 0000000..cbc6c5e
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7153-staging-fsl-mc-Added-MSI-support-to-the-MC-bus-drive.patch
@@ -0,0 +1,59 @@
+From f588a135d9260f2e7fe29b0bb0b5294fc9c99f6c Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:29 -0600
+Subject: [PATCH 153/226] staging: fsl-mc: Added MSI support to the MC bus
+ driver
+
+Initialize/Cleanup ITS-MSI support for the MC bus driver at driver
+init/exit time. Associate an MSI domain with each DPAA2 child device.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -16,6 +16,8 @@
+ #include <linux/ioport.h>
+ #include <linux/slab.h>
+ #include <linux/limits.h>
++#include <linux/bitops.h>
++#include <linux/msi.h>
+ #include "../include/dpmng.h"
+ #include "../include/mc-sys.h"
+ #include "dprc-cmd.h"
+@@ -472,6 +474,8 @@ int fsl_mc_device_add(struct dprc_obj_de
+ mc_dev->icid = parent_mc_dev->icid;
+ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
+ mc_dev->dev.dma_mask = &mc_dev->dma_mask;
++ dev_set_msi_domain(&mc_dev->dev,
++ dev_get_msi_domain(&parent_mc_dev->dev));
+ }
+
+ /*
+@@ -833,8 +837,15 @@ static int __init fsl_mc_bus_driver_init
+ if (error < 0)
+ goto error_cleanup_dprc_driver;
+
++ error = its_fsl_mc_msi_init();
++ if (error < 0)
++ goto error_cleanup_mc_allocator;
++
+ return 0;
+
++error_cleanup_mc_allocator:
++ fsl_mc_allocator_driver_exit();
++
+ error_cleanup_dprc_driver:
+ dprc_driver_exit();
+
+@@ -856,6 +867,7 @@ static void __exit fsl_mc_bus_driver_exi
+ if (WARN_ON(!mc_dev_cache))
+ return;
+
++ its_fsl_mc_msi_cleanup();
+ fsl_mc_allocator_driver_exit();
+ dprc_driver_exit();
+ platform_driver_unregister(&fsl_mc_bus_driver);
diff --git a/target/linux/layerscape/patches-4.4/7154-staging-fsl-mc-Remove-unneeded-parentheses.patch b/target/linux/layerscape/patches-4.4/7154-staging-fsl-mc-Remove-unneeded-parentheses.patch
new file mode 100644
index 0000000..64af81c
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7154-staging-fsl-mc-Remove-unneeded-parentheses.patch
@@ -0,0 +1,39 @@
+From 6ce3c078c4eac406b38de689c8e366d7345a51ba Mon Sep 17 00:00:00 2001
+From: Janani Ravichandran <janani.rvchndrn at gmail.com>
+Date: Thu, 11 Feb 2016 18:00:25 -0500
+Subject: [PATCH 154/226] staging: fsl-mc: Remove unneeded parentheses
+
+Remove unneeded parentheses on the right hand side of assignment
+statements.
+Semantic patch:
+
+@@
+expression a, b, c;
+@@
+
+(
+ a = (b == c)
+|
+ a =
+- (
+ b
+- )
+)
+
+Signed-off-by: Janani Ravichandran <janani.rvchndrn at gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -129,7 +129,7 @@ static void check_plugged_state_change(s
+ {
+ int error;
+ u32 plugged_flag_at_mc =
+- (obj_desc->state & DPRC_OBJ_STATE_PLUGGED);
++ obj_desc->state & DPRC_OBJ_STATE_PLUGGED;
+
+ if (plugged_flag_at_mc !=
+ (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) {
diff --git a/target/linux/layerscape/patches-4.4/7155-staging-fsl-mc-Do-not-allow-building-as-a-module.patch b/target/linux/layerscape/patches-4.4/7155-staging-fsl-mc-Do-not-allow-building-as-a-module.patch
new file mode 100644
index 0000000..1ae6253
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7155-staging-fsl-mc-Do-not-allow-building-as-a-module.patch
@@ -0,0 +1,30 @@
+From 322ff2fe86ec4dead2d2bceb20b624c72bdd1405 Mon Sep 17 00:00:00 2001
+From: Thierry Reding <treding at nvidia.com>
+Date: Mon, 15 Feb 2016 14:22:22 +0100
+Subject: [PATCH 155/226] staging: fsl-mc: Do not allow building as a module
+
+This driver uses functionality (MSI IRQ domain) whose symbols aren't
+exported, and hence the modular build fails. While arguably there might
+be reasons to make these symbols available to modules, that change would
+be fairly involved and the set of exported functions should be carefully
+auditioned. Fix the build failure for now by marking the driver boolean.
+
+Cc: J. German Rivera <German.Rivera at freescale.com>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Signed-off-by: Thierry Reding <treding at nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/fsl-mc/bus/Kconfig
++++ b/drivers/staging/fsl-mc/bus/Kconfig
+@@ -7,7 +7,7 @@
+ #
+
+ config FSL_MC_BUS
+- tristate "Freescale Management Complex (MC) bus driver"
++ bool "Freescale Management Complex (MC) bus driver"
+ depends on OF && ARM64
+ select GENERIC_MSI_IRQ_DOMAIN
+ help
diff --git a/target/linux/layerscape/patches-4.4/7156-staging-fsl-mc-Avoid-section-mismatch.patch b/target/linux/layerscape/patches-4.4/7156-staging-fsl-mc-Avoid-section-mismatch.patch
new file mode 100644
index 0000000..60fecd9
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7156-staging-fsl-mc-Avoid-section-mismatch.patch
@@ -0,0 +1,43 @@
+From b2e5cfb43faf26517d191de65121f1a40166340f Mon Sep 17 00:00:00 2001
+From: Thierry Reding <treding at nvidia.com>
+Date: Mon, 15 Feb 2016 14:22:23 +0100
+Subject: [PATCH 156/226] staging: fsl-mc: Avoid section mismatch
+
+The fsl_mc_allocator_driver_exit() function is marked __exit, but is
+called by the error handling code in fsl_mc_allocator_driver_init().
+This results in a section mismatch, which in turn could lead to
+executing random code.
+
+Remove the __exit annotation to fix this.
+
+Cc: J. German Rivera <German.Rivera at freescale.com>
+Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+Signed-off-by: Thierry Reding <treding at nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-allocator.c | 2 +-
+ drivers/staging/fsl-mc/include/mc-private.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
++++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
+@@ -756,7 +756,7 @@ int __init fsl_mc_allocator_driver_init(
+ return fsl_mc_driver_register(&fsl_mc_allocator_driver);
+ }
+
+-void __exit fsl_mc_allocator_driver_exit(void)
++void fsl_mc_allocator_driver_exit(void)
+ {
+ fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
+ }
+--- a/drivers/staging/fsl-mc/include/mc-private.h
++++ b/drivers/staging/fsl-mc/include/mc-private.h
+@@ -123,7 +123,7 @@ void dprc_driver_exit(void);
+
+ int __init fsl_mc_allocator_driver_init(void);
+
+-void __exit fsl_mc_allocator_driver_exit(void);
++void fsl_mc_allocator_driver_exit(void);
+
+ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+ enum fsl_mc_pool_type pool_type,
diff --git a/target/linux/layerscape/patches-4.4/7157-staging-fsl-mc-Remove-unneeded-else-following-a-retu.patch b/target/linux/layerscape/patches-4.4/7157-staging-fsl-mc-Remove-unneeded-else-following-a-retu.patch
new file mode 100644
index 0000000..ee0d1f6
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7157-staging-fsl-mc-Remove-unneeded-else-following-a-retu.patch
@@ -0,0 +1,45 @@
+From 5f82c6ff69f3a4bb635e619a893292bea711421e Mon Sep 17 00:00:00 2001
+From: Janani Ravichandran <janani.rvchndrn at gmail.com>
+Date: Thu, 18 Feb 2016 17:22:50 -0500
+Subject: [PATCH 157/226] staging: fsl-mc: Remove unneeded else following a
+ return
+
+Remove unnecessary else when there is a return statement in the
+corresponding if block. Coccinelle patch used:
+
+ at rule1@
+expression e1;
+@@
+
+ if (e1) { ... return ...; }
+- else{
+ ...
+- }
+
+ at rule2@
+expression e2;
+statement s1;
+@@
+
+ if(e2) { ... return ...; }
+- else
+ s1
+
+Signed-off-by: Janani Ravichandran <janani.rvchndrn at gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -248,8 +248,7 @@ static bool fsl_mc_is_root_dprc(struct d
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ if (!root_dprc_dev)
+ return false;
+- else
+- return dev == root_dprc_dev;
++ return dev == root_dprc_dev;
+ }
+
+ static int get_dprc_icid(struct fsl_mc_io *mc_io,
diff --git a/target/linux/layerscape/patches-4.4/7158-staging-fsl-mc-Drop-unneeded-void-pointer-cast.patch b/target/linux/layerscape/patches-4.4/7158-staging-fsl-mc-Drop-unneeded-void-pointer-cast.patch
new file mode 100644
index 0000000..51e8792
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7158-staging-fsl-mc-Drop-unneeded-void-pointer-cast.patch
@@ -0,0 +1,43 @@
+From d9605741556a15dceed105afd7369d644aa46207 Mon Sep 17 00:00:00 2001
+From: Janani Ravichandran <janani.rvchndrn at gmail.com>
+Date: Thu, 25 Feb 2016 14:46:11 -0500
+Subject: [PATCH 158/226] staging: fsl-mc: Drop unneeded void pointer cast
+
+Void pointers need not be cast to other pointer types.
+Semantic patch used:
+
+ at r@
+expression x;
+void *e;
+type T;
+identifier f;
+@@
+
+(
+ *((T *)e)
+|
+ ((T *)x) [...]
+|
+ ((T *)x)->f
+|
+- (T *)
+ e
+)
+
+Signed-off-by: Janani Ravichandran <janani.rvchndrn at gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -407,7 +407,7 @@ static irqreturn_t dprc_irq0_handler_thr
+ {
+ int error;
+ u32 status;
+- struct device *dev = (struct device *)arg;
++ struct device *dev = arg;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
diff --git a/target/linux/layerscape/patches-4.4/7159-staging-fsl-mc-bus-Eliminate-double-function-call.patch b/target/linux/layerscape/patches-4.4/7159-staging-fsl-mc-bus-Eliminate-double-function-call.patch
new file mode 100644
index 0000000..74ac4fd
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7159-staging-fsl-mc-bus-Eliminate-double-function-call.patch
@@ -0,0 +1,73 @@
+From ecd7b5d9616e50f48a400749f17db19fd8a43f25 Mon Sep 17 00:00:00 2001
+From: Bhaktipriya Shridhar <bhaktipriya96 at gmail.com>
+Date: Sun, 28 Feb 2016 23:58:05 +0530
+Subject: [PATCH 159/226] staging: fsl-mc: bus: Eliminate double function call
+
+A call to irq_find_matching_host was already made and the result
+has been stored in mc_msi_domain. mc_msi_domain is again reassigned
+using the same function call which is redundant.
+
+irq_find_matching_host returns/locates a domain for a given fwnode.
+The domain is identified using device node and bus_token(if several
+domains have same device node but different purposes they can be
+distinguished using bus-specific token).
+http://www.bricktou.com/include/linux/irqdomain_irq_find_matching_host_en.html
+
+Also, of_property_read_bool finds and reads a boolean from a property
+device node from which the property value is to be read. It doesn't
+alter the device node.
+http://lists.infradead.org/pipermail/linux-arm-kernel/2012-February/083698.html
+
+Since, both the function calls have the same device node and bus_token,
+the return values shall be the same. Hence, the second call has been
+removed.
+
+This was done using Coccinelle:
+
+ at r@
+idexpression *x;
+identifier f;
+position p1,p2;
+@@
+
+x at p1 = f(...)
+... when != x
+(
+x at p2 = f(...)
+)
+
+ at script:python@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+if (p1[0].line == p2[0].line):
+ cocci.include_match(False)
+
+@@
+idexpression *x;
+identifier f;
+position r.p1,r.p2;
+@@
+
+*x at p1 = f(...)
+...
+*x at p2 = f(...)
+
+Signed-off-by: Bhaktipriya Shridhar <bhaktipriya96 at gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ .../staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
++++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+@@ -118,8 +118,6 @@ void its_fsl_mc_msi_cleanup(void)
+ if (!of_property_read_bool(np, "msi-controller"))
+ continue;
+
+- mc_msi_domain = irq_find_matching_host(np,
+- DOMAIN_BUS_FSL_MC_MSI);
+ if (mc_msi_domain &&
+ mc_msi_domain->host_data == &its_fsl_mc_msi_domain_info)
+ irq_domain_remove(mc_msi_domain);
diff --git a/target/linux/layerscape/patches-4.4/7160-Staging-fsl-mc-Replace-pr_debug-with-dev_dbg.patch b/target/linux/layerscape/patches-4.4/7160-Staging-fsl-mc-Replace-pr_debug-with-dev_dbg.patch
new file mode 100644
index 0000000..2d9c947
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7160-Staging-fsl-mc-Replace-pr_debug-with-dev_dbg.patch
@@ -0,0 +1,96 @@
+From 8727f71717b449a4c74a5a599374c05822d525f7 Mon Sep 17 00:00:00 2001
+From: Bhumika Goyal <bhumirks at gmail.com>
+Date: Fri, 4 Mar 2016 19:14:52 +0530
+Subject: [PATCH 160/226] Staging: fsl-mc: Replace pr_debug with dev_dbg
+
+This patch replaces pr_debug calls with dev_dbg when the device structure
+is available as dev_* prints identifying information about the struct
+device.
+Done using coccinelle:
+
+ at r exists@
+identifier f, s;
+identifier x;
+position p;
+@@
+f(...,struct s *x,...) {
+<+...
+when != x == NULL
+\(pr_err at p\|pr_debug at p\|pr_info\)(...);
+...+>
+}
+
+ at r2@
+identifier fld2;
+identifier r.s;
+@@
+
+struct s {
+ ...
+ struct device *fld2;
+ ...
+};
+
+@@
+identifier r.x,r2.fld2;
+position r.p;
+@@
+
+(
+-pr_err at p
++dev_err
+ (
++ &x->fld2,
+...)
+|
+- pr_debug at p
++ dev_dbg
+ (
++ &x->fld2,
+...)
+|
+- pr_info at p
++ dev_info
+ (
++ &x->fld2,
+...)
+)
+
+Signed-off-by: Bhumika Goyal <bhumirks at gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-sys.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-sys.c
++++ b/drivers/staging/fsl-mc/bus/mc-sys.c
+@@ -328,7 +328,8 @@ static int mc_polling_wait_preemptible(s
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+
+ if (time_after_eq(jiffies, jiffies_until_timeout)) {
+- pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
++ dev_dbg(&mc_io->dev,
++ "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)
+ MC_CMD_HDR_READ_TOKEN(cmd->header),
+@@ -369,7 +370,8 @@ static int mc_polling_wait_atomic(struct
+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+ if (timeout_usecs == 0) {
+- pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
++ dev_dbg(&mc_io->dev,
++ "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)
+ MC_CMD_HDR_READ_TOKEN(cmd->header),
+@@ -424,7 +426,8 @@ int mc_send_command(struct fsl_mc_io *mc
+ goto common_exit;
+
+ if (status != MC_CMD_STATUS_OK) {
+- pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
++ dev_dbg(&mc_io->dev,
++ "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header),
+ (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header),
diff --git a/target/linux/layerscape/patches-4.4/7161-Staging-fsl-mc-Replace-pr_err-with-dev_err.patch b/target/linux/layerscape/patches-4.4/7161-Staging-fsl-mc-Replace-pr_err-with-dev_err.patch
new file mode 100644
index 0000000..3da41c1
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7161-Staging-fsl-mc-Replace-pr_err-with-dev_err.patch
@@ -0,0 +1,83 @@
+From 79b4625a6ab72251e00aa94ee22a6bfe32dbeeda Mon Sep 17 00:00:00 2001
+From: Bhumika Goyal <bhumirks at gmail.com>
+Date: Fri, 4 Mar 2016 19:15:55 +0530
+Subject: [PATCH 161/226] Staging: fsl-mc: Replace pr_err with dev_err
+
+This patch replaces pr_err calls with dev_err when the device structure
+is available as dev_* prints identifying information about the struct device.
+Done using coccinelle:
+
+ at r exists@
+identifier f, s;
+identifier x;
+position p;
+@@
+f(...,struct s *x,...) {
+<+...
+when != x == NULL
+\(pr_err at p\|pr_debug at p\|pr_info\)(...);
+...+>
+}
+
+ at r2@
+identifier fld2;
+identifier r.s;
+@@
+
+struct s {
+ ...
+ struct device *fld2;
+ ...
+};
+
+@@
+identifier r.x,r2.fld2;
+position r.p;
+@@
+
+(
+-pr_err at p
++dev_err
+ (
++ &x->fld2,
+...)
+|
+- pr_debug at p
++ dev_dbg
+ (
++ &x->fld2,
+...)
+|
+- pr_info at p
++ dev_info
+ (
++ &x->fld2,
+...)
+)
+
+Signed-off-by: Bhumika Goyal <bhumirks at gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -260,14 +260,15 @@ static int get_dprc_icid(struct fsl_mc_i
+
+ error = dprc_open(mc_io, 0, container_id, &dprc_handle);
+ if (error < 0) {
+- pr_err("dprc_open() failed: %d\n", error);
++ dev_err(&mc_io->dev, "dprc_open() failed: %d\n", error);
+ return error;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr);
+ if (error < 0) {
+- pr_err("dprc_get_attributes() failed: %d\n", error);
++ dev_err(&mc_io->dev, "dprc_get_attributes() failed: %d\n",
++ error);
+ goto common_cleanup;
+ }
+
diff --git a/target/linux/layerscape/patches-4.4/7162-staging-fsl-mc-fix-incorrect-type-passed-to-dev_dbg-.patch b/target/linux/layerscape/patches-4.4/7162-staging-fsl-mc-fix-incorrect-type-passed-to-dev_dbg-.patch
new file mode 100644
index 0000000..4a32602
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7162-staging-fsl-mc-fix-incorrect-type-passed-to-dev_dbg-.patch
@@ -0,0 +1,48 @@
+From 83e0f572a74bceeb3736b19b929c91d12d1d6d80 Mon Sep 17 00:00:00 2001
+From: Cihangir Akturk <cakturk at gmail.com>
+Date: Mon, 14 Mar 2016 18:14:06 +0200
+Subject: [PATCH 162/226] staging: fsl-mc: fix incorrect type passed to
+ dev_dbg macros
+
+dev_dbg macros expect const struct device ** as its second
+argument but here the argument we are passing is of type
+struct device ** this patch fixes this error.
+
+Fixes: de71daf5c839 ("Staging: fsl-mc: Replace pr_debug with dev_dbg")
+Cc: Bhumika Goyal <bhumirks at gmail.com>
+Reported-by: Guenter Roeck <linux at roeck-us.net>
+Signed-off-by: Cihangir Akturk <cakturk at gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-sys.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-sys.c
++++ b/drivers/staging/fsl-mc/bus/mc-sys.c
+@@ -328,7 +328,7 @@ static int mc_polling_wait_preemptible(s
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+
+ if (time_after_eq(jiffies, jiffies_until_timeout)) {
+- dev_dbg(&mc_io->dev,
++ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)
+@@ -370,7 +370,7 @@ static int mc_polling_wait_atomic(struct
+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+ if (timeout_usecs == 0) {
+- dev_dbg(&mc_io->dev,
++ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)
+@@ -426,7 +426,7 @@ int mc_send_command(struct fsl_mc_io *mc
+ goto common_exit;
+
+ if (status != MC_CMD_STATUS_OK) {
+- dev_dbg(&mc_io->dev,
++ dev_dbg(mc_io->dev,
+ "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
+ mc_io->portal_phys_addr,
+ (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header),
diff --git a/target/linux/layerscape/patches-4.4/7163-staging-fsl-mc-fix-incorrect-type-passed-to-dev_err-.patch b/target/linux/layerscape/patches-4.4/7163-staging-fsl-mc-fix-incorrect-type-passed-to-dev_err-.patch
new file mode 100644
index 0000000..024e10f
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7163-staging-fsl-mc-fix-incorrect-type-passed-to-dev_err-.patch
@@ -0,0 +1,38 @@
+From 79929c151efbc047a8a82f9cafcb9238465caa86 Mon Sep 17 00:00:00 2001
+From: Cihangir Akturk <cakturk at gmail.com>
+Date: Mon, 14 Mar 2016 18:14:07 +0200
+Subject: [PATCH 163/226] staging: fsl-mc: fix incorrect type passed to
+ dev_err macros
+
+dev_err macros expect const struct device ** as its second
+argument, but here the argument we are passing is of typ
+struct device **. This patch fixes this error.
+
+Fixes: 454b0ec8bf99 ("Staging: fsl-mc: Replace pr_err with dev_err")
+Cc: Bhumika Goyal <bhumirks at gmail.com>
+Reported-by: Guenter Roeck <linux at roeck-us.net>
+Signed-off-by: Cihangir Akturk <cakturk at gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -260,14 +260,14 @@ static int get_dprc_icid(struct fsl_mc_i
+
+ error = dprc_open(mc_io, 0, container_id, &dprc_handle);
+ if (error < 0) {
+- dev_err(&mc_io->dev, "dprc_open() failed: %d\n", error);
++ dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
+ return error;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr);
+ if (error < 0) {
+- dev_err(&mc_io->dev, "dprc_get_attributes() failed: %d\n",
++ dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto common_cleanup;
+ }
diff --git a/target/linux/layerscape/patches-4.4/7164-staging-fsl-mc-get-rid-of-mutex_locked-variables.patch b/target/linux/layerscape/patches-4.4/7164-staging-fsl-mc-get-rid-of-mutex_locked-variables.patch
new file mode 100644
index 0000000..d16485f
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7164-staging-fsl-mc-get-rid-of-mutex_locked-variables.patch
@@ -0,0 +1,207 @@
+From d36a6b361a3a181559daebcf32e11ab18431a854 Mon Sep 17 00:00:00 2001
+From: Cihangir Akturk <cakturk at gmail.com>
+Date: Sat, 9 Apr 2016 21:45:18 +0300
+Subject: [PATCH 164/226] staging: fsl-mc: get rid of mutex_locked variables
+
+Remove mutex_locked variables which are used to determine whether mutex is
+locked, instead add another label to unlock mutex on premature exits due to
+an error.
+
+This patch also addresses the folowing warnings reported by coccinelle:
+
+drivers/staging/fsl-mc/bus/mc-allocator.c:237:1-7: preceding lock on line 204
+drivers/staging/fsl-mc/bus/mc-allocator.c:89:1-7: preceding lock on line 57
+drivers/staging/fsl-mc/bus/mc-allocator.c:157:1-7: preceding lock on line 124
+
+Signed-off-by: Cihangir Akturk <cakturk at gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-allocator.c | 61 ++++++++++++-----------------
+ 1 file changed, 24 insertions(+), 37 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
++++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
+@@ -39,7 +39,6 @@ static int __must_check fsl_mc_resource_
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ int error = -EINVAL;
+- bool mutex_locked = false;
+
+ if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
+ goto out;
+@@ -55,13 +54,12 @@ static int __must_check fsl_mc_resource_
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+- mutex_locked = true;
+
+ if (WARN_ON(res_pool->max_count < 0))
+- goto out;
++ goto out_unlock;
+ if (WARN_ON(res_pool->free_count < 0 ||
+ res_pool->free_count > res_pool->max_count))
+- goto out;
++ goto out_unlock;
+
+ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
+ GFP_KERNEL);
+@@ -69,7 +67,7 @@ static int __must_check fsl_mc_resource_
+ error = -ENOMEM;
+ dev_err(&mc_bus_dev->dev,
+ "Failed to allocate memory for fsl_mc_resource\n");
+- goto out;
++ goto out_unlock;
+ }
+
+ resource->type = pool_type;
+@@ -82,10 +80,9 @@ static int __must_check fsl_mc_resource_
+ res_pool->free_count++;
+ res_pool->max_count++;
+ error = 0;
++out_unlock:
++ mutex_unlock(&res_pool->mutex);
+ out:
+- if (mutex_locked)
+- mutex_unlock(&res_pool->mutex);
+-
+ return error;
+ }
+
+@@ -106,7 +103,6 @@ static int __must_check fsl_mc_resource_
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ int error = -EINVAL;
+- bool mutex_locked = false;
+
+ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
+ goto out;
+@@ -122,13 +118,12 @@ static int __must_check fsl_mc_resource_
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+- mutex_locked = true;
+
+ if (WARN_ON(res_pool->max_count <= 0))
+- goto out;
++ goto out_unlock;
+ if (WARN_ON(res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count))
+- goto out;
++ goto out_unlock;
+
+ /*
+ * If the device is currently allocated, its resource is not
+@@ -139,7 +134,7 @@ static int __must_check fsl_mc_resource_
+ dev_err(&mc_bus_dev->dev,
+ "Device %s cannot be removed from resource pool\n",
+ dev_name(&mc_dev->dev));
+- goto out;
++ goto out_unlock;
+ }
+
+ list_del(&resource->node);
+@@ -150,10 +145,9 @@ static int __must_check fsl_mc_resource_
+ devm_kfree(&mc_bus_dev->dev, resource);
+ mc_dev->resource = NULL;
+ error = 0;
++out_unlock:
++ mutex_unlock(&res_pool->mutex);
+ out:
+- if (mutex_locked)
+- mutex_unlock(&res_pool->mutex);
+-
+ return error;
+ }
+
+@@ -188,21 +182,19 @@ int __must_check fsl_mc_resource_allocat
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ int error = -EINVAL;
+- bool mutex_locked = false;
+
+ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
+ FSL_MC_NUM_POOL_TYPES);
+
+ *new_resource = NULL;
+ if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
+- goto error;
++ goto out;
+
+ res_pool = &mc_bus->resource_pools[pool_type];
+ if (WARN_ON(res_pool->mc_bus != mc_bus))
+- goto error;
++ goto out;
+
+ mutex_lock(&res_pool->mutex);
+- mutex_locked = true;
+ resource = list_first_entry_or_null(&res_pool->free_list,
+ struct fsl_mc_resource, node);
+
+@@ -212,28 +204,26 @@ int __must_check fsl_mc_resource_allocat
+ dev_err(&mc_bus_dev->dev,
+ "No more resources of type %s left\n",
+ fsl_mc_pool_type_strings[pool_type]);
+- goto error;
++ goto out_unlock;
+ }
+
+ if (WARN_ON(resource->type != pool_type))
+- goto error;
++ goto out_unlock;
+ if (WARN_ON(resource->parent_pool != res_pool))
+- goto error;
++ goto out_unlock;
+ if (WARN_ON(res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count))
+- goto error;
++ goto out_unlock;
+
+ list_del(&resource->node);
+ INIT_LIST_HEAD(&resource->node);
+
+ res_pool->free_count--;
++ error = 0;
++out_unlock:
+ mutex_unlock(&res_pool->mutex);
+ *new_resource = resource;
+- return 0;
+-error:
+- if (mutex_locked)
+- mutex_unlock(&res_pool->mutex);
+-
++out:
+ return error;
+ }
+ EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
+@@ -241,26 +231,23 @@ EXPORT_SYMBOL_GPL(fsl_mc_resource_alloca
+ void fsl_mc_resource_free(struct fsl_mc_resource *resource)
+ {
+ struct fsl_mc_resource_pool *res_pool;
+- bool mutex_locked = false;
+
+ res_pool = resource->parent_pool;
+ if (WARN_ON(resource->type != res_pool->type))
+- goto out;
++ return;
+
+ mutex_lock(&res_pool->mutex);
+- mutex_locked = true;
+ if (WARN_ON(res_pool->free_count < 0 ||
+ res_pool->free_count >= res_pool->max_count))
+- goto out;
++ goto out_unlock;
+
+ if (WARN_ON(!list_empty(&resource->node)))
+- goto out;
++ goto out_unlock;
+
+ list_add_tail(&resource->node, &res_pool->free_list);
+ res_pool->free_count++;
+-out:
+- if (mutex_locked)
+- mutex_unlock(&res_pool->mutex);
++out_unlock:
++ mutex_unlock(&res_pool->mutex);
+ }
+ EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
+
diff --git a/target/linux/layerscape/patches-4.4/7165-staging-fsl-mc-TODO-updates.patch b/target/linux/layerscape/patches-4.4/7165-staging-fsl-mc-TODO-updates.patch
new file mode 100644
index 0000000..858c6e2
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7165-staging-fsl-mc-TODO-updates.patch
@@ -0,0 +1,49 @@
+From 7b3bffea6d36f396faf1814088f03a6b8efe1ccb Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Mon, 11 Apr 2016 11:48:37 -0500
+Subject: [PATCH 165/226] staging: fsl-mc: TODO updates
+
+remove 3 of the remaining TODO items:
+
+ -multiple root fsl-mc buses-- done in patch series starting with
+ commit 14f928054a05 ("staging: fsl-mc: abstract test for existence
+ of fsl-mc bus")
+
+ -interrupt support-- done in patch series starting with
+ commit 9b1b282ccd81 ("irqdomain: Added domain bus token
+ DOMAIN_BUS_FSL_MC_MSI")
+
+ -MC command serialization-- done in commit 63f2be5c3b358 ("staging:
+ fsl-mc: Added serialization to mc_send_command()")
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/TODO | 13 -------------
+ 1 file changed, 13 deletions(-)
+
+--- a/drivers/staging/fsl-mc/TODO
++++ b/drivers/staging/fsl-mc/TODO
+@@ -1,21 +1,8 @@
+-* Decide if multiple root fsl-mc buses will be supported per Linux instance,
+- and if so add support for this.
+-
+ * Add at least one device driver for a DPAA2 object (child device of the
+ fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet
+ driver support, which depends on drivers for several objects: DPNI,
+ DPIO, DPMAC. Other pre-requisites include:
+
+- * interrupt support. for meaningful driver support we need
+- interrupts, and thus need message interrupt support by the bus
+- driver.
+- -Note: this has dependencies on generic MSI support work
+- in process upstream, see [1] and [2].
+-
+- * Management Complex (MC) command serialization. locking mechanisms
+- are needed by drivers to serialize commands sent to the MC, including
+- from atomic context.
+-
+ * MC firmware uprev. The MC firmware upon which the fsl-mc
+ bus driver and DPAA2 object drivers are based is continuing
+ to evolve, so minor updates are needed to keep in sync with binary
diff --git a/target/linux/layerscape/patches-4.4/7166-staging-fsl-mc-DPAA2-overview-readme-update.patch b/target/linux/layerscape/patches-4.4/7166-staging-fsl-mc-DPAA2-overview-readme-update.patch
new file mode 100644
index 0000000..f63574d
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7166-staging-fsl-mc-DPAA2-overview-readme-update.patch
@@ -0,0 +1,279 @@
+From 720bf9c9a6fdff63ecc4b382a5092c0020fb7b42 Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Mon, 11 Apr 2016 11:48:42 -0500
+Subject: [PATCH 166/226] staging: fsl-mc: DPAA2 overview readme update
+
+incorporated feedback from review comments, other misc cleanup/tweaks
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/README.txt | 138 +++++++++++++++++++++----------------
+ 1 file changed, 80 insertions(+), 58 deletions(-)
+
+--- a/drivers/staging/fsl-mc/README.txt
++++ b/drivers/staging/fsl-mc/README.txt
+@@ -11,11 +11,11 @@ Contents summary
+ -Overview of DPAA2 objects
+ -DPAA2 Linux driver architecture overview
+ -bus driver
+- -dprc driver
++ -DPRC driver
+ -allocator
+- -dpio driver
++ -DPIO driver
+ -Ethernet
+- -mac
++ -MAC
+
+ DPAA2 Overview
+ --------------
+@@ -37,6 +37,9 @@ interfaces, an L2 switch, or accelerator
+ The MC provides memory-mapped I/O command interfaces (MC portals)
+ which DPAA2 software drivers use to operate on DPAA2 objects:
+
++The diagram below shows an overview of the DPAA2 resource management
++architecture:
++
+ +--------------------------------------+
+ | OS |
+ | DPAA2 drivers |
+@@ -77,13 +80,13 @@ DPIO objects.
+
+ Overview of DPAA2 Objects
+ -------------------------
+-The section provides a brief overview of some key objects
+-in the DPAA2 hardware. A simple scenario is described illustrating
+-the objects involved in creating a network interfaces.
++The section provides a brief overview of some key DPAA2 objects.
++A simple scenario is described illustrating the objects involved
++in creating a network interfaces.
+
+ -DPRC (Datapath Resource Container)
+
+- A DPRC is an container object that holds all the other
++ A DPRC is a container object that holds all the other
+ types of DPAA2 objects. In the example diagram below there
+ are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
+ in the container.
+@@ -101,23 +104,23 @@ the objects involved in creating a netwo
+ | |
+ +---------------------------------------------------------+
+
+- From the point of view of an OS, a DPRC is bus-like. Like
+- a plug-and-play bus, such as PCI, DPRC commands can be used to
+- enumerate the contents of the DPRC, discover the hardware
+- objects present (including mappable regions and interrupts).
++ From the point of view of an OS, a DPRC behaves similar to a plug and
++ play bus, like PCI. DPRC commands can be used to enumerate the contents
++ of the DPRC, discover the hardware objects present (including mappable
++ regions and interrupts).
+
+- dprc.1 (bus)
++ DPRC.1 (bus)
+ |
+ +--+--------+-------+-------+-------+
+ | | | | |
+- dpmcp.1 dpio.1 dpbp.1 dpni.1 dpmac.1
+- dpmcp.2 dpio.2
+- dpmcp.3
++ DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1
++ DPMCP.2 DPIO.2
++ DPMCP.3
+
+ Hardware objects can be created and destroyed dynamically, providing
+ the ability to hot plug/unplug objects in and out of the DPRC.
+
+- A DPRC has a mappable mmio region (an MC portal) that can be used
++ A DPRC has a mappable MMIO region (an MC portal) that can be used
+ to send MC commands. It has an interrupt for status events (like
+ hotplug).
+
+@@ -137,10 +140,11 @@ the objects involved in creating a netwo
+ A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
+ queuing mechanisms, configuration mechanisms, buffer management,
+ physical ports, and interrupts. DPAA2 uses a more granular approach
+- utilizing multiple hardware objects. Each object has specialized
+- functions, and are used together by software to provide Ethernet network
+- interface functionality. This approach provides efficient use of finite
+- hardware resources, flexibility, and performance advantages.
++ utilizing multiple hardware objects. Each object provides specialized
++ functions. Groups of these objects are used by software to provide
++ Ethernet network interface functionality. This approach provides
++ efficient use of finite hardware resources, flexibility, and
++ performance advantages.
+
+ The diagram below shows the objects needed for a simple
+ network interface configuration on a system with 2 CPUs.
+@@ -168,46 +172,52 @@ the objects involved in creating a netwo
+
+ Below the objects are described. For each object a brief description
+ is provided along with a summary of the kinds of operations the object
+- supports and a summary of key resources of the object (mmio regions
+- and irqs).
++ supports and a summary of key resources of the object (MMIO regions
++ and IRQs).
+
+ -DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a
+ hardware device that connects to an Ethernet PHY and allows
+ physical transmission and reception of Ethernet frames.
+- -mmio regions: none
+- -irqs: dpni link change
++ -MMIO regions: none
++ -IRQs: DPNI link change
+ -commands: set link up/down, link config, get stats,
+- irq config, enable, reset
++ IRQ config, enable, reset
+
+ -DPNI (Datapath Network Interface): contains TX/RX queues,
+- network interface configuration, and rx buffer pool configuration
+- mechanisms.
+- -mmio regions: none
+- -irqs: link state
++ network interface configuration, and RX buffer pool configuration
++ mechanisms. The TX/RX queues are in memory and are identified by
++ queue number.
++ -MMIO regions: none
++ -IRQs: link state
+ -commands: port config, offload config, queue config,
+- parse/classify config, irq config, enable, reset
++ parse/classify config, IRQ config, enable, reset
+
+ -DPIO (Datapath I/O): provides interfaces to enqueue and dequeue
+- packets and do hardware buffer pool management operations. For
+- optimum performance there is typically DPIO per CPU. This allows
+- each CPU to perform simultaneous enqueue/dequeue operations.
+- -mmio regions: queue operations, buffer mgmt
+- -irqs: data availability, congestion notification, buffer
++ packets and do hardware buffer pool management operations. The DPAA2
++ architecture separates the mechanism to access queues (the DPIO object)
++ from the queues themselves. The DPIO provides an MMIO interface to
++ enqueue/dequeue packets. To enqueue something a descriptor is written
++ to the DPIO MMIO region, which includes the target queue number.
++ There will typically be one DPIO assigned to each CPU. This allows all
++ CPUs to simultaneously perform enqueue/dequeued operations. DPIOs are
++ expected to be shared by different DPAA2 drivers.
++ -MMIO regions: queue operations, buffer management
++ -IRQs: data availability, congestion notification, buffer
+ pool depletion
+- -commands: irq config, enable, reset
++ -commands: IRQ config, enable, reset
+
+ -DPBP (Datapath Buffer Pool): represents a hardware buffer
+ pool.
+- -mmio regions: none
+- -irqs: none
++ -MMIO regions: none
++ -IRQs: none
+ -commands: enable, reset
+
+ -DPMCP (Datapath MC Portal): provides an MC command portal.
+ Used by drivers to send commands to the MC to manage
+ objects.
+- -mmio regions: MC command portal
+- -irqs: command completion
+- -commands: irq config, enable, reset
++ -MMIO regions: MC command portal
++ -IRQs: command completion
++ -commands: IRQ config, enable, reset
+
+ Object Connections
+ ------------------
+@@ -268,22 +278,22 @@ of each driver follows.
+ | Stack |
+ +------------+ +------------+
+ | Allocator |. . . . . . . | Ethernet |
+- |(dpmcp,dpbp)| | (dpni) |
++ |(DPMCP,DPBP)| | (DPNI) |
+ +-.----------+ +---+---+----+
+ . . ^ |
+ . . <data avail, | |<enqueue,
+ . . tx confirm> | | dequeue>
+ +-------------+ . | |
+ | DPRC driver | . +---+---V----+ +---------+
+- | (dprc) | . . . . . .| DPIO driver| | MAC |
+- +----------+--+ | (dpio) | | (dpmac) |
++ | (DPRC) | . . . . . .| DPIO driver| | MAC |
++ +----------+--+ | (DPIO) | | (DPMAC) |
+ | +------+-----+ +-----+---+
+ |<dev add/remove> | |
+ | | |
+ +----+--------------+ | +--+---+
+- | mc-bus driver | | | PHY |
++ | MC-bus driver | | | PHY |
+ | | | |driver|
+- | /fsl-mc at 80c000000 | | +--+---+
++ | /soc/fsl-mc | | +--+---+
+ +-------------------+ | |
+ | |
+ ================================ HARDWARE =========|=================|======
+@@ -298,25 +308,27 @@ of each driver follows.
+
+ A brief description of each driver is provided below.
+
+- mc-bus driver
++ MC-bus driver
+ -------------
+- The mc-bus driver is a platform driver and is probed from an
+- "/fsl-mc at xxxx" node in the device tree passed in by boot firmware.
+- It is responsible for bootstrapping the DPAA2 kernel infrastructure.
++ The MC-bus driver is a platform driver and is probed from a
++ node in the device tree (compatible "fsl,qoriq-mc") passed in by boot
++ firmware. It is responsible for bootstrapping the DPAA2 kernel
++ infrastructure.
+ Key functions include:
+ -registering a new bus type named "fsl-mc" with the kernel,
+ and implementing bus call-backs (e.g. match/uevent/dev_groups)
+- -implemeting APIs for DPAA2 driver registration and for device
++ -implementing APIs for DPAA2 driver registration and for device
+ add/remove
+- -creates an MSI irq domain
+- -do a device add of the 'root' DPRC device, which is needed
+- to bootstrap things
++ -creates an MSI IRQ domain
++ -doing a 'device add' to expose the 'root' DPRC, in turn triggering
++ a bind of the root DPRC to the DPRC driver
+
+ DPRC driver
+ -----------
+- The dprc-driver is bound DPRC objects and does runtime management
++ The DPRC driver is bound to DPRC objects and does runtime management
+ of a bus instance. It performs the initial bus scan of the DPRC
+- and handles interrupts for container events such as hot plug.
++ and handles interrupts for container events such as hot plug by
++ re-scanning the DPRC.
+
+ Allocator
+ ----------
+@@ -334,14 +346,20 @@ A brief description of each driver is pr
+ DPIO driver
+ -----------
+ The DPIO driver is bound to DPIO objects and provides services that allow
+- other drivers such as the Ethernet driver to receive and transmit data.
++ other drivers such as the Ethernet driver to enqueue and dequeue data for
++ their respective objects.
+ Key services include:
+ -data availability notifications
+ -hardware queuing operations (enqueue and dequeue of data)
+ -hardware buffer pool management
+
++ To transmit a packet the Ethernet driver puts data on a queue and
++ invokes a DPIO API. For receive, the Ethernet driver registers
++ a data availability notification callback. To dequeue a packet
++ a DPIO API is used.
++
+ There is typically one DPIO object per physical CPU for optimum
+- performance, allowing each CPU to simultaneously enqueue
++ performance, allowing different CPUs to simultaneously enqueue
+ and dequeue data.
+
+ The DPIO driver operates on behalf of all DPAA2 drivers
+@@ -362,3 +380,7 @@ A brief description of each driver is pr
+ by the appropriate PHY driver via an mdio bus. The MAC driver
+ plays a role of being a proxy between the PHY driver and the
+ MC. It does this proxy via the MC commands to a DPMAC object.
++ If the PHY driver signals a link change, the MAC driver notifies
++ the MC via a DPMAC command. If a network interface is brought
++ up or down, the MC notifies the DPMAC driver via an interrupt and
++ the driver can take appropriate action.
diff --git a/target/linux/layerscape/patches-4.4/7167-staging-fsl-mc-update-dpmcp-binary-interface-to-v3.0.patch b/target/linux/layerscape/patches-4.4/7167-staging-fsl-mc-update-dpmcp-binary-interface-to-v3.0.patch
new file mode 100644
index 0000000..b759606
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7167-staging-fsl-mc-update-dpmcp-binary-interface-to-v3.0.patch
@@ -0,0 +1,123 @@
+From fa245614c92ffbdaec6a56552032432b5343b1dc Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Mon, 11 Apr 2016 11:48:48 -0500
+Subject: [PATCH 167/226] staging: fsl-mc: update dpmcp binary interface to
+ v3.0
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 5 ++---
+ drivers/staging/fsl-mc/bus/dpmcp.c | 35 ++------------------------------
+ drivers/staging/fsl-mc/bus/dpmcp.h | 10 ++-------
+ 3 files changed, 6 insertions(+), 44 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+@@ -33,8 +33,8 @@
+ #define _FSL_DPMCP_CMD_H
+
+ /* DPMCP Version */
+-#define DPMCP_VER_MAJOR 2
+-#define DPMCP_VER_MINOR 1
++#define DPMCP_VER_MAJOR 3
++#define DPMCP_VER_MINOR 0
+
+ /* Command IDs */
+ #define DPMCP_CMDID_CLOSE 0x800
+@@ -52,6 +52,5 @@
+ #define DPMCP_CMDID_SET_IRQ_MASK 0x014
+ #define DPMCP_CMDID_GET_IRQ_MASK 0x015
+ #define DPMCP_CMDID_GET_IRQ_STATUS 0x016
+-#define DPMCP_CMDID_CLEAR_IRQ_STATUS 0x017
+
+ #endif /* _FSL_DPMCP_CMD_H */
+--- a/drivers/staging/fsl-mc/bus/dpmcp.c
++++ b/drivers/staging/fsl-mc/bus/dpmcp.c
+@@ -213,7 +213,7 @@ int dpmcp_set_irq(struct fsl_mc_io *mc_i
+ cmd.params[0] |= mc_enc(0, 8, irq_index);
+ cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
+- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+@@ -254,7 +254,7 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_i
+ /* retrieve response parameters */
+ irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
+ irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
+- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
++ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
+ *type = (int)mc_dec(cmd.params[2], 32, 32);
+ return 0;
+ }
+@@ -435,37 +435,6 @@ int dpmcp_get_irq_status(struct fsl_mc_i
+ }
+
+ /**
+- * dpmcp_clear_irq_status() - Clear a pending interrupt's status
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @status: Bits to clear (W1C) - one bit per cause:
+- * 0 = don't change
+- * 1 = clear status bit
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLEAR_IRQ_STATUS,
+- cmd_flags, token);
+- cmd.params[0] |= mc_enc(0, 32, status);
+- cmd.params[0] |= mc_enc(32, 8, irq_index);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+ * dpmcp_get_attributes - Retrieve DPMCP attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+--- a/drivers/staging/fsl-mc/bus/dpmcp.h
++++ b/drivers/staging/fsl-mc/bus/dpmcp.h
+@@ -82,12 +82,12 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
+ * struct dpmcp_irq_cfg - IRQ configuration
+ * @paddr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+- * @user_irq_id: A user defined number associated with this IRQ
++ * @irq_num: A user defined number associated with this IRQ
+ */
+ struct dpmcp_irq_cfg {
+ uint64_t paddr;
+ uint32_t val;
+- int user_irq_id;
++ int irq_num;
+ };
+
+ int dpmcp_set_irq(struct fsl_mc_io *mc_io,
+@@ -133,12 +133,6 @@ int dpmcp_get_irq_status(struct fsl_mc_i
+ uint8_t irq_index,
+ uint32_t *status);
+
+-int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint32_t status);
+-
+ /**
+ * struct dpmcp_attr - Structure representing DPMCP attributes
+ * @id: DPMCP object ID
diff --git a/target/linux/layerscape/patches-4.4/7168-staging-fsl-mc-update-dpbp-binary-interface-to-v2.2.patch b/target/linux/layerscape/patches-4.4/7168-staging-fsl-mc-update-dpbp-binary-interface-to-v2.2.patch
new file mode 100644
index 0000000..474baf4
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7168-staging-fsl-mc-update-dpbp-binary-interface-to-v2.2.patch
@@ -0,0 +1,208 @@
+From de0fa9842d52e4e80576d378f32aa9ca76a4270b Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Mon, 11 Apr 2016 11:48:54 -0500
+Subject: [PATCH 168/226] staging: fsl-mc: update dpbp binary interface to
+ v2.2
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dpbp.c | 77 ++++++++++++++++++++++++++++-
+ drivers/staging/fsl-mc/include/dpbp-cmd.h | 4 +-
+ drivers/staging/fsl-mc/include/dpbp.h | 51 ++++++++++++++++++-
+ 3 files changed, 127 insertions(+), 5 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dpbp.c
++++ b/drivers/staging/fsl-mc/bus/dpbp.c
+@@ -293,7 +293,7 @@ int dpbp_set_irq(struct fsl_mc_io *mc_io
+ cmd.params[0] |= mc_enc(0, 8, irq_index);
+ cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr);
+- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+@@ -334,7 +334,7 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io
+ /* retrieve response parameters */
+ irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
+ irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64);
+- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
++ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
+ *type = (int)mc_dec(cmd.params[2], 32, 32);
+ return 0;
+ }
+@@ -502,6 +502,7 @@ int dpbp_get_irq_status(struct fsl_mc_io
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
+ cmd_flags, token);
++ cmd.params[0] |= mc_enc(0, 32, *status);
+ cmd.params[0] |= mc_enc(32, 8, irq_index);
+
+ /* send command to mc*/
+@@ -580,3 +581,75 @@ int dpbp_get_attributes(struct fsl_mc_io
+ return 0;
+ }
+ EXPORT_SYMBOL(dpbp_get_attributes);
++
++/**
++ * dpbp_set_notifications() - Set notifications towards software
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @cfg: notifications configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_set_notifications(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpbp_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
++ cmd_flags,
++ token);
++
++ cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry);
++ cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit);
++ cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry);
++ cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit);
++ cmd.params[2] |= mc_enc(0, 16, cfg->options);
++ cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx);
++ cmd.params[4] |= mc_enc(0, 64, cfg->message_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpbp_get_notifications() - Get the notifications configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @cfg: notifications configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_get_notifications(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpbp_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ cfg->depletion_entry = (u32)mc_dec(cmd.params[0], 0, 32);
++ cfg->depletion_exit = (u32)mc_dec(cmd.params[0], 32, 32);
++ cfg->surplus_entry = (u32)mc_dec(cmd.params[1], 0, 32);
++ cfg->surplus_exit = (u32)mc_dec(cmd.params[1], 32, 32);
++ cfg->options = (u16)mc_dec(cmd.params[2], 0, 16);
++ cfg->message_ctx = (u64)mc_dec(cmd.params[3], 0, 64);
++ cfg->message_iova = (u64)mc_dec(cmd.params[4], 0, 64);
++
++ return 0;
++}
+--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
++++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h
+@@ -34,7 +34,7 @@
+
+ /* DPBP Version */
+ #define DPBP_VER_MAJOR 2
+-#define DPBP_VER_MINOR 1
++#define DPBP_VER_MINOR 2
+
+ /* Command IDs */
+ #define DPBP_CMDID_CLOSE 0x800
+@@ -57,4 +57,6 @@
+ #define DPBP_CMDID_GET_IRQ_STATUS 0x016
+ #define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
+
++#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
++#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
+ #endif /* _FSL_DPBP_CMD_H */
+--- a/drivers/staging/fsl-mc/include/dpbp.h
++++ b/drivers/staging/fsl-mc/include/dpbp.h
+@@ -85,12 +85,12 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
+ * struct dpbp_irq_cfg - IRQ configuration
+ * @addr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+- * @user_irq_id: A user defined number associated with this IRQ
++ * @irq_num: A user defined number associated with this IRQ
+ */
+ struct dpbp_irq_cfg {
+ u64 addr;
+ u32 val;
+- int user_irq_id;
++ int irq_num;
+ };
+
+ int dpbp_set_irq(struct fsl_mc_io *mc_io,
+@@ -168,6 +168,53 @@ int dpbp_get_attributes(struct fsl_mc_io
+ u16 token,
+ struct dpbp_attr *attr);
+
++/**
++ * DPBP notifications options
++ */
++
++/**
++ * BPSCN write will attempt to allocate into a cache (coherent write)
++ */
++#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
++
++/**
++ * struct dpbp_notification_cfg - Structure representing DPBP notifications
++ * towards software
++ * @depletion_entry: below this threshold the pool is "depleted";
++ * set it to '0' to disable it
++ * @depletion_exit: greater than or equal to this threshold the pool exit its
++ * "depleted" state
++ * @surplus_entry: above this threshold the pool is in "surplus" state;
++ * set it to '0' to disable it
++ * @surplus_exit: less than or equal to this threshold the pool exit its
++ * "surplus" state
++ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
++ * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
++ * must be 16B aligned.
++ * @message_ctx: The context that will be part of the BPSCN message and will
++ * be written to 'message_iova'
++ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
++ */
++struct dpbp_notification_cfg {
++ u32 depletion_entry;
++ u32 depletion_exit;
++ u32 surplus_entry;
++ u32 surplus_exit;
++ u64 message_iova;
++ u64 message_ctx;
++ u16 options;
++};
++
++int dpbp_set_notifications(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpbp_notification_cfg *cfg);
++
++int dpbp_get_notifications(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpbp_notification_cfg *cfg);
++
+ /** @} */
+
+ #endif /* __FSL_DPBP_H */
diff --git a/target/linux/layerscape/patches-4.4/7169-staging-fsl-mc-update-dprc-binary-interface-to-v5.1.patch b/target/linux/layerscape/patches-4.4/7169-staging-fsl-mc-update-dprc-binary-interface-to-v5.1.patch
new file mode 100644
index 0000000..4db2998
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7169-staging-fsl-mc-update-dprc-binary-interface-to-v5.1.patch
@@ -0,0 +1,206 @@
+From 45dce4cd82ddc618ade56747620a2a29f7d9a99d Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Mon, 11 Apr 2016 11:48:59 -0500
+Subject: [PATCH 169/226] staging: fsl-mc: update dprc binary interface to
+ v5.1
+
+The meaning of the "status" parameter in dprc_get_irq_status
+has changed, and this patch updates the flib and caller
+of the API.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-cmd.h | 4 ++--
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 1 +
+ drivers/staging/fsl-mc/bus/dprc.c | 26 +++++++++++++-------------
+ drivers/staging/fsl-mc/bus/mc-msi.c | 2 +-
+ drivers/staging/fsl-mc/include/dprc.h | 19 ++++++++++++-------
+ 5 files changed, 29 insertions(+), 23 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
+@@ -41,8 +41,8 @@
+ #define _FSL_DPRC_CMD_H
+
+ /* DPRC Version */
+-#define DPRC_VER_MAJOR 4
+-#define DPRC_VER_MINOR 0
++#define DPRC_VER_MAJOR 5
++#define DPRC_VER_MINOR 1
+
+ /* Command IDs */
+ #define DPRC_CMDID_CLOSE 0x800
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -423,6 +423,7 @@ static irqreturn_t dprc_irq0_handler_thr
+ if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
+ goto out;
+
++ status = 0;
+ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ &status);
+ if (error < 0) {
+--- a/drivers/staging/fsl-mc/bus/dprc.c
++++ b/drivers/staging/fsl-mc/bus/dprc.c
+@@ -265,7 +265,7 @@ int dprc_get_irq(struct fsl_mc_io *mc_io
+ /* retrieve response parameters */
+ irq_cfg->val = mc_dec(cmd.params[0], 0, 32);
+ irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64);
+- irq_cfg->user_irq_id = mc_dec(cmd.params[2], 0, 32);
++ irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32);
+ *type = mc_dec(cmd.params[2], 32, 32);
+
+ return 0;
+@@ -296,7 +296,7 @@ int dprc_set_irq(struct fsl_mc_io *mc_io
+ cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
+- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+@@ -466,6 +466,7 @@ int dprc_get_irq_status(struct fsl_mc_io
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
+ cmd_flags, token);
++ cmd.params[0] |= mc_enc(0, 32, *status);
+ cmd.params[0] |= mc_enc(32, 8, irq_index);
+
+ /* send command to mc*/
+@@ -948,6 +949,7 @@ int dprc_get_obj(struct fsl_mc_io *mc_io
+ obj_desc->state = mc_dec(cmd.params[1], 32, 32);
+ obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16);
+ obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16);
++ obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
+ obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8);
+ obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8);
+ obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8);
+@@ -1042,6 +1044,7 @@ int dprc_get_obj_desc(struct fsl_mc_io *
+ obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32);
+ obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16);
+ obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16);
++ obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
+ obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8);
+ obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8);
+ obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8);
+@@ -1108,7 +1111,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *m
+ cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
+- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+ cmd.params[2] |= mc_enc(32, 32, obj_id);
+ cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
+ cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
+@@ -1189,7 +1192,7 @@ int dprc_get_obj_irq(struct fsl_mc_io *m
+ /* retrieve response parameters */
+ irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
+ irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
+- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
++ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
+ *type = (int)mc_dec(cmd.params[2], 32, 32);
+
+ return 0;
+@@ -1437,14 +1440,8 @@ EXPORT_SYMBOL(dprc_set_obj_label);
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Endpoint 2 configuration parameters
+ * @cfg: Connection configuration. The connection configuration is ignored for
+- * connections made to DPMAC objects, where rate is set according to
+- * MAC configuration.
+- * The committed rate is the guaranteed rate for the connection.
+- * The maximum rate is an upper limit allowed for the connection; it is
+- * expected to be equal or higher than the committed rate.
+- * When committed and maximum rates are both zero, the connection is set
+- * to "best effort" mode, having lower priority compared to connections
+- * with committed or maximum rates.
++ * connections made to DPMAC objects, where rate is retrieved from the
++ * MAC configuration.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+@@ -1555,7 +1552,10 @@ int dprc_disconnect(struct fsl_mc_io *mc
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+-* @state: Returned link state: 1 - link is up, 0 - link is down
++* @state: Returned link state:
++* 1 - link is up;
++* 0 - link is down;
++* -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENAVAIL if connection does not exist.
+ */
+--- a/drivers/staging/fsl-mc/bus/mc-msi.c
++++ b/drivers/staging/fsl-mc/bus/mc-msi.c
+@@ -65,7 +65,7 @@ static void __fsl_mc_msi_write_msg(struc
+ irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
+ msi_desc->msg.address_lo;
+ irq_cfg.val = msi_desc->msg.data;
+- irq_cfg.user_irq_id = msi_desc->irq;
++ irq_cfg.irq_num = msi_desc->irq;
+
+ if (owner_mc_dev == mc_bus_dev) {
+ /*
+--- a/drivers/staging/fsl-mc/include/dprc.h
++++ b/drivers/staging/fsl-mc/include/dprc.h
+@@ -94,11 +94,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
+ */
+ #define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
+
+-/* IOMMU bypass - indicates whether objects of this container are permitted
+- * to bypass the IOMMU.
+- */
+-#define DPRC_CFG_OPT_IOMMU_BYPASS 0x00000010
+-
+ /* AIOP - Indicates that container belongs to AIOP. */
+ #define DPRC_CFG_OPT_AIOP 0x00000020
+
+@@ -173,12 +168,12 @@ int dprc_reset_container(struct fsl_mc_i
+ * struct dprc_irq_cfg - IRQ configuration
+ * @paddr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+- * @user_irq_id: A user defined number associated with this IRQ
++ * @irq_num: A user defined number associated with this IRQ
+ */
+ struct dprc_irq_cfg {
+ phys_addr_t paddr;
+ u32 val;
+- int user_irq_id;
++ int irq_num;
+ };
+
+ int dprc_set_irq(struct fsl_mc_io *mc_io,
+@@ -353,6 +348,14 @@ int dprc_get_obj_count(struct fsl_mc_io
+ #define DPRC_OBJ_STATE_PLUGGED 0x00000002
+
+ /**
++ * Shareability flag - Object flag indicating no memory shareability.
++ * the object generates memory accesses that are non coherent with other
++ * masters;
++ * user is responsible for proper memory handling through IOMMU configuration.
++ */
++#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
++
++/**
+ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
+ * @type: Type of object: NULL terminated string
+ * @id: ID of logical object resource
+@@ -363,6 +366,7 @@ int dprc_get_obj_count(struct fsl_mc_io
+ * @region_count: Number of mappable regions supported by the object
+ * @state: Object state: combination of DPRC_OBJ_STATE_ states
+ * @label: Object label
++ * @flags: Object's flags
+ */
+ struct dprc_obj_desc {
+ char type[16];
+@@ -374,6 +378,7 @@ struct dprc_obj_desc {
+ u8 region_count;
+ u32 state;
+ char label[16];
++ u16 flags;
+ };
+
+ int dprc_get_obj(struct fsl_mc_io *mc_io,
diff --git a/target/linux/layerscape/patches-4.4/7170-staging-fsl-mc-don-t-use-object-versions-to-make-bin.patch b/target/linux/layerscape/patches-4.4/7170-staging-fsl-mc-don-t-use-object-versions-to-make-bin.patch
new file mode 100644
index 0000000..2224d45
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7170-staging-fsl-mc-don-t-use-object-versions-to-make-bin.patch
@@ -0,0 +1,136 @@
+From 9382e1723e4de9832407f7e65bd4812b31e5a51d Mon Sep 17 00:00:00 2001
+From: Itai Katz <itai.katz at nxp.com>
+Date: Mon, 11 Apr 2016 11:55:40 -0500
+Subject: [PATCH 170/226] staging: fsl-mc: don't use object versions to make
+ binding decisions
+
+Up until now if the object version expected by a driver (in the API header
+file) did not match the actual object version in the MC hardware the bus
+driver refused to bind the object to the driver or printed out WARN_ON
+dumps.
+
+This patch removes those checks, and the responsibility of object version
+checking should now be done in the object drivers themselves. If the actual
+version discovered is not supported, the driver's probe function should fail.
+Drivers should use version checks to support new features and provide
+backwards compatibility if at all possible.
+
+This patch also removes the checks that caused bus driver probing to fail
+if the overall MC version discovered did not match the firmware version
+from the API header...this was too strict. The overall MC version is
+informational like a release number, and continues to be printed in the
+boot log.
+
+Signed-off-by: Itai Katz <itai.katz at nxp.com>
+(Stuart: reworded commit log)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 4 +--
+ drivers/staging/fsl-mc/bus/mc-allocator.c | 6 -----
+ drivers/staging/fsl-mc/bus/mc-bus.c | 38 +----------------------------
+ 3 files changed, 2 insertions(+), 46 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -780,9 +780,7 @@ static int dprc_remove(struct fsl_mc_dev
+ static const struct fsl_mc_device_match_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+- .obj_type = "dprc",
+- .ver_major = DPRC_VER_MAJOR,
+- .ver_minor = DPRC_VER_MINOR},
++ .obj_type = "dprc"},
+ {.vendor = 0x0},
+ };
+
+--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
++++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
+@@ -709,20 +709,14 @@ static const struct fsl_mc_device_match_
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpbp",
+- .ver_major = DPBP_VER_MAJOR,
+- .ver_minor = DPBP_VER_MINOR
+ },
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpmcp",
+- .ver_major = DPMCP_VER_MAJOR,
+- .ver_minor = DPMCP_VER_MINOR
+ },
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpcon",
+- .ver_major = DPCON_VER_MAJOR,
+- .ver_minor = DPCON_VER_MINOR
+ },
+ {.vendor = 0x0},
+ };
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -40,8 +40,6 @@ static int fsl_mc_bus_match(struct devic
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ bool found = false;
+- bool major_version_mismatch = false;
+- bool minor_version_mismatch = false;
+
+ if (WARN_ON(!fsl_mc_bus_exists()))
+ goto out;
+@@ -64,32 +62,12 @@ static int fsl_mc_bus_match(struct devic
+ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
+ if (id->vendor == mc_dev->obj_desc.vendor &&
+ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
+- if (id->ver_major == mc_dev->obj_desc.ver_major) {
+- found = true;
+- if (id->ver_minor != mc_dev->obj_desc.ver_minor)
+- minor_version_mismatch = true;
+- } else {
+- major_version_mismatch = true;
+- }
++ found = true;
+
+ break;
+ }
+ }
+
+- if (major_version_mismatch) {
+- dev_warn(dev,
+- "Major version mismatch: driver version %u.%u, MC object version %u.%u\n",
+- id->ver_major, id->ver_minor,
+- mc_dev->obj_desc.ver_major,
+- mc_dev->obj_desc.ver_minor);
+- } else if (minor_version_mismatch) {
+- dev_warn(dev,
+- "Minor version mismatch: driver version %u.%u, MC object version %u.%u\n",
+- id->ver_major, id->ver_minor,
+- mc_dev->obj_desc.ver_major,
+- mc_dev->obj_desc.ver_minor);
+- }
+-
+ out:
+ dev_dbg(dev, "%smatched\n", found ? "" : "not ");
+ return found;
+@@ -722,20 +700,6 @@ static int fsl_mc_bus_probe(struct platf
+ "Freescale Management Complex Firmware version: %u.%u.%u\n",
+ mc_version.major, mc_version.minor, mc_version.revision);
+
+- if (mc_version.major < MC_VER_MAJOR) {
+- dev_err(&pdev->dev,
+- "ERROR: MC firmware version not supported by driver (driver version: %u.%u)\n",
+- MC_VER_MAJOR, MC_VER_MINOR);
+- error = -ENOTSUPP;
+- goto error_cleanup_mc_io;
+- }
+-
+- if (mc_version.major > MC_VER_MAJOR) {
+- dev_warn(&pdev->dev,
+- "WARNING: driver may not support newer MC firmware features (driver version: %u.%u)\n",
+- MC_VER_MAJOR, MC_VER_MINOR);
+- }
+-
+ error = get_mc_addr_translation_ranges(&pdev->dev,
+ &mc->translation_ranges,
+ &mc->num_translation_ranges);
diff --git a/target/linux/layerscape/patches-4.4/7171-staging-fsl-mc-set-up-coherent-dma-ops-for-added-dev.patch b/target/linux/layerscape/patches-4.4/7171-staging-fsl-mc-set-up-coherent-dma-ops-for-added-dev.patch
new file mode 100644
index 0000000..d6de805
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7171-staging-fsl-mc-set-up-coherent-dma-ops-for-added-dev.patch
@@ -0,0 +1,29 @@
+From 3657147d6fea1977c07373325626bf50fe15bcfc Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Mon, 11 Apr 2016 11:49:13 -0500
+Subject: [PATCH 171/226] staging: fsl-mc: set up coherent dma ops for added
+ devices
+
+Unless discovered devices have the no shareability flag set,
+set up coherent dma ops for them.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -469,6 +469,10 @@ int fsl_mc_device_add(struct dprc_obj_de
+ goto error_cleanup_dev;
+ }
+
++ /* Objects are coherent, unless 'no shareability' flag set. */
++ if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY))
++ arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
++
+ /*
+ * The device-specific probe callback will get invoked by device_add()
+ */
diff --git a/target/linux/layerscape/patches-4.4/7172-staging-fsl-mc-set-cacheable-flag-for-added-devices-.patch b/target/linux/layerscape/patches-4.4/7172-staging-fsl-mc-set-cacheable-flag-for-added-devices-.patch
new file mode 100644
index 0000000..7de34d1
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7172-staging-fsl-mc-set-cacheable-flag-for-added-devices-.patch
@@ -0,0 +1,30 @@
+From f7011c18a26d40a07b837a79d0efdad795ad7250 Mon Sep 17 00:00:00 2001
+From: Itai Katz <itai.katz at nxp.com>
+Date: Mon, 11 Apr 2016 11:55:48 -0500
+Subject: [PATCH 172/226] staging: fsl-mc: set cacheable flag for added
+ devices if applicable
+
+Some DPAA2 devices have mmio regions that should be mapped as
+cacheable by drivers. Set IORESOURCE_CACHEABLE in the region's
+flags if applicable.
+
+Signed-off-by: Itai Katz <itai.katz at nxp.com>
+[Stuart: update subject and commit message]
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -354,6 +354,8 @@ static int fsl_mc_device_get_mmio_region
+ regions[i].end = regions[i].start + region_desc.size - 1;
+ regions[i].name = "fsl-mc object MMIO region";
+ regions[i].flags = IORESOURCE_IO;
++ if (region_desc.flags & DPRC_REGION_CACHEABLE)
++ regions[i].flags |= IORESOURCE_CACHEABLE;
+ }
+
+ mc_dev->regions = regions;
diff --git a/target/linux/layerscape/patches-4.4/7173-staging-fsl-mc-get-version-of-root-dprc-from-MC-hard.patch b/target/linux/layerscape/patches-4.4/7173-staging-fsl-mc-get-version-of-root-dprc-from-MC-hard.patch
new file mode 100644
index 0000000..ade5b33
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7173-staging-fsl-mc-get-version-of-root-dprc-from-MC-hard.patch
@@ -0,0 +1,106 @@
+From 2df13a365ecda7e3321cf9d4e1a9ebd63e58c28b Mon Sep 17 00:00:00 2001
+From: Itai Katz <itai.katz at nxp.com>
+Date: Mon, 11 Apr 2016 11:55:55 -0500
+Subject: [PATCH 173/226] staging: fsl-mc: get version of root dprc from MC
+ hardware
+
+The root dprc is discovered as a platform device in the device tree. The
+version of that dprc was previously set using hardcoded values from the API
+header in the kernel). This patch removes the use of the hardcoded version
+numbers and instead reads the actual dprc version from the hardware.
+
+Signed-off-by: Itai Katz <itai.katz at nxp.com>
+(Stuart: resolved merge conflict, updated commit subject/log)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 45 ++++++++++++++++++++++++++++-------
+ 1 file changed, 37 insertions(+), 8 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -229,11 +229,10 @@ static bool fsl_mc_is_root_dprc(struct d
+ return dev == root_dprc_dev;
+ }
+
+-static int get_dprc_icid(struct fsl_mc_io *mc_io,
+- int container_id, u16 *icid)
++static int get_dprc_attr(struct fsl_mc_io *mc_io,
++ int container_id, struct dprc_attributes *attr)
+ {
+ u16 dprc_handle;
+- struct dprc_attributes attr;
+ int error;
+
+ error = dprc_open(mc_io, 0, container_id, &dprc_handle);
+@@ -242,15 +241,14 @@ static int get_dprc_icid(struct fsl_mc_i
+ return error;
+ }
+
+- memset(&attr, 0, sizeof(attr));
+- error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr);
++ memset(attr, 0, sizeof(struct dprc_attributes));
++ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
+ if (error < 0) {
+ dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto common_cleanup;
+ }
+
+- *icid = attr.icid;
+ error = 0;
+
+ common_cleanup:
+@@ -258,6 +256,34 @@ common_cleanup:
+ return error;
+ }
+
++static int get_dprc_icid(struct fsl_mc_io *mc_io,
++ int container_id, u16 *icid)
++{
++ struct dprc_attributes attr;
++ int error;
++
++ error = get_dprc_attr(mc_io, container_id, &attr);
++ if (error == 0)
++ *icid = attr.icid;
++
++ return error;
++}
++
++static int get_dprc_version(struct fsl_mc_io *mc_io,
++ int container_id, u16 *major, u16 *minor)
++{
++ struct dprc_attributes attr;
++ int error;
++
++ error = get_dprc_attr(mc_io, container_id, &attr);
++ if (error == 0) {
++ *major = attr.version.major;
++ *minor = attr.version.minor;
++ }
++
++ return error;
++}
++
+ static int translate_mc_addr(struct fsl_mc_device *mc_dev,
+ enum dprc_region_type mc_region_type,
+ u64 mc_offset, phys_addr_t *phys_addr)
+@@ -719,11 +745,14 @@ static int fsl_mc_bus_probe(struct platf
+ goto error_cleanup_mc_io;
+ }
+
++ error = get_dprc_version(mc_io, container_id,
++ &obj_desc.ver_major, &obj_desc.ver_minor);
++ if (error < 0)
++ goto error_cleanup_mc_io;
++
+ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
+ strcpy(obj_desc.type, "dprc");
+ obj_desc.id = container_id;
+- obj_desc.ver_major = DPRC_VER_MAJOR;
+- obj_desc.ver_minor = DPRC_VER_MINOR;
+ obj_desc.irq_count = 1;
+ obj_desc.region_count = 0;
+
diff --git a/target/linux/layerscape/patches-4.4/7174-staging-fsl-mc-add-dprc-version-check.patch b/target/linux/layerscape/patches-4.4/7174-staging-fsl-mc-add-dprc-version-check.patch
new file mode 100644
index 0000000..a5ec35c
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7174-staging-fsl-mc-add-dprc-version-check.patch
@@ -0,0 +1,90 @@
+From 653898b483e5448084b15214a8c20959b418dbe7 Mon Sep 17 00:00:00 2001
+From: Itai Katz <itai.katz at nxp.com>
+Date: Mon, 11 Apr 2016 11:56:05 -0500
+Subject: [PATCH 174/226] staging: fsl-mc: add dprc version check
+
+The dprc driver supports dprc version 5.0 and above.
+This patch adds the code to check the version.
+
+Signed-off-by: Itai Katz <itai.katz at nxp.com>
+(Stuart: resolved merge conflicts, split dpseci quirk into separate patch)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-cmd.h | 6 +++---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 19 +++++++++++++++++++
+ drivers/staging/fsl-mc/bus/mc-bus.c | 1 +
+ drivers/staging/fsl-mc/include/mc-private.h | 2 ++
+ 4 files changed, 25 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
+@@ -40,9 +40,9 @@
+ #ifndef _FSL_DPRC_CMD_H
+ #define _FSL_DPRC_CMD_H
+
+-/* DPRC Version */
+-#define DPRC_VER_MAJOR 5
+-#define DPRC_VER_MINOR 1
++/* Minimal supported DPRC Version */
++#define DPRC_MIN_VER_MAJOR 5
++#define DPRC_MIN_VER_MINOR 0
+
+ /* Command IDs */
+ #define DPRC_CMDID_CLOSE 0x800
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -693,6 +693,25 @@ static int dprc_probe(struct fsl_mc_devi
+ goto error_cleanup_msi_domain;
+ }
+
++ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ &mc_bus->dprc_attr);
++ if (error < 0) {
++ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
++ error);
++ goto error_cleanup_open;
++ }
++
++ if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
++ (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
++ mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
++ dev_err(&mc_dev->dev,
++ "ERROR: DPRC version %d.%d not supported\n",
++ mc_bus->dprc_attr.version.major,
++ mc_bus->dprc_attr.version.minor);
++ error = -ENOTSUPP;
++ goto error_cleanup_open;
++ }
++
+ mutex_init(&mc_bus->scan_mutex);
+
+ /*
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -745,6 +745,7 @@ static int fsl_mc_bus_probe(struct platf
+ goto error_cleanup_mc_io;
+ }
+
++ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
+ error = get_dprc_version(mc_io, container_id,
+ &obj_desc.ver_major, &obj_desc.ver_minor);
+ if (error < 0)
+--- a/drivers/staging/fsl-mc/include/mc-private.h
++++ b/drivers/staging/fsl-mc/include/mc-private.h
+@@ -94,12 +94,14 @@ struct fsl_mc_resource_pool {
+ * from the physical DPRC.
+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
+ * @scan_mutex: Serializes bus scanning
++ * @dprc_attr: DPRC attributes
+ */
+ struct fsl_mc_bus {
+ struct fsl_mc_device mc_dev;
+ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
+ struct fsl_mc_device_irq *irq_resources;
+ struct mutex scan_mutex; /* serializes bus scanning */
++ struct dprc_attributes dprc_attr;
+ };
+
+ #define to_fsl_mc_bus(_mc_dev) \
diff --git a/target/linux/layerscape/patches-4.4/7175-staging-fsl-mc-add-quirk-handling-for-dpseci-objects.patch b/target/linux/layerscape/patches-4.4/7175-staging-fsl-mc-add-quirk-handling-for-dpseci-objects.patch
new file mode 100644
index 0000000..36c6f89
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7175-staging-fsl-mc-add-quirk-handling-for-dpseci-objects.patch
@@ -0,0 +1,38 @@
+From 5366dc8896ca7cf028db73643860821b189a1dfd Mon Sep 17 00:00:00 2001
+From: Horia Geanta <horia.geanta at nxp.com>
+Date: Mon, 11 Apr 2016 11:50:26 -0500
+Subject: [PATCH 175/226] staging: fsl-mc: add quirk handling for dpseci
+ objects < 4.0
+
+dpseci objects < 4.0 are not coherent-- in spite of the fact
+that the MC reports them to be coherent in certain versions.
+Add a special case to set the no shareability flag for dpseci
+objects < 4.0.
+
+Signed-off-by: Horia Geanta <horia.geanta at nxp.com>
+(Stuart: reworded commit message, updated comment in patch)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -312,6 +312,15 @@ int dprc_scan_objects(struct fsl_mc_devi
+ continue;
+ }
+
++ /*
++ * add a quirk for all versions of dpsec < 4.0...none
++ * are coherent regardless of what the MC reports.
++ */
++ if ((strcmp(obj_desc->type, "dpseci") == 0) &&
++ (obj_desc->ver_major < 4))
++ obj_desc->flags |=
++ DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY;
++
+ irq_count += obj_desc->irq_count;
+ dev_dbg(&mc_bus_dev->dev,
+ "Discovered object: type %s, id %d\n",
diff --git a/target/linux/layerscape/patches-4.4/7176-staging-fsl-mc-add-dpmcp-version-check.patch b/target/linux/layerscape/patches-4.4/7176-staging-fsl-mc-add-dpmcp-version-check.patch
new file mode 100644
index 0000000..148a724
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7176-staging-fsl-mc-add-dpmcp-version-check.patch
@@ -0,0 +1,56 @@
+From 035789ffb3b89b9764d7cc79d209a5795c18fa93 Mon Sep 17 00:00:00 2001
+From: Itai Katz <itai.katz at nxp.com>
+Date: Mon, 11 Apr 2016 11:56:11 -0500
+Subject: [PATCH 176/226] staging: fsl-mc: add dpmcp version check
+
+The dpmcp driver supports dpmcp version 3.0 and above.
+This patch adds the code to check the version.
+
+Signed-off-by: Itai Katz <itai.katz at nxp.com>
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+
+ drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 6 +++---
+ drivers/staging/fsl-mc/bus/mc-allocator.c | 11 +++++++++++
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 6 +++---
+ drivers/staging/fsl-mc/bus/mc-allocator.c | 11 +++++++++++
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
++++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+@@ -32,9 +32,9 @@
+ #ifndef _FSL_DPMCP_CMD_H
+ #define _FSL_DPMCP_CMD_H
+
+-/* DPMCP Version */
+-#define DPMCP_VER_MAJOR 3
+-#define DPMCP_VER_MINOR 0
++/* Minimal supported DPMCP Version */
++#define DPMCP_MIN_VER_MAJOR 3
++#define DPMCP_MIN_VER_MINOR 0
+
+ /* Command IDs */
+ #define DPMCP_CMDID_CLOSE 0x800
+--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
++++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
+@@ -297,6 +297,17 @@ int __must_check fsl_mc_portal_allocate(
+ if (WARN_ON(!dpmcp_dev))
+ goto error_cleanup_resource;
+
++ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
++ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
++ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
++ dev_err(&dpmcp_dev->dev,
++ "ERROR: Version %d.%d of DPMCP not supported.\n",
++ dpmcp_dev->obj_desc.ver_major,
++ dpmcp_dev->obj_desc.ver_minor);
++ error = -ENOTSUPP;
++ goto error_cleanup_resource;
++ }
++
+ if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0))
+ goto error_cleanup_resource;
+
diff --git a/target/linux/layerscape/patches-4.4/7177-staging-fsl-mc-return-EINVAL-for-all-fsl_mc_portal_a.patch b/target/linux/layerscape/patches-4.4/7177-staging-fsl-mc-return-EINVAL-for-all-fsl_mc_portal_a.patch
new file mode 100644
index 0000000..3a5a3f5
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7177-staging-fsl-mc-return-EINVAL-for-all-fsl_mc_portal_a.patch
@@ -0,0 +1,30 @@
+From 324147c1a6806301d9441a8d83c7c5ac880140cd Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta at nxp.com>
+Date: Mon, 11 Apr 2016 11:56:16 -0500
+Subject: [PATCH 177/226] staging: fsl-mc: return -EINVAL for all
+ fsl_mc_portal_allocate() failures
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+There are some error paths that allow for a NULL new_mc_io and err = 0
+return code. Return -EINVAL instead.
+
+Signed-off-by: Horia Geantă <horia.geanta at nxp.com>
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Acked-by: German Rivera <german.rivera at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-allocator.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
++++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
+@@ -293,6 +293,7 @@ int __must_check fsl_mc_portal_allocate(
+ if (error < 0)
+ return error;
+
++ error = -EINVAL;
+ dpmcp_dev = resource->data;
+ if (WARN_ON(!dpmcp_dev))
+ goto error_cleanup_resource;
diff --git a/target/linux/layerscape/patches-4.4/7178-staging-fsl-mc-bus-Drop-warning.patch b/target/linux/layerscape/patches-4.4/7178-staging-fsl-mc-bus-Drop-warning.patch
new file mode 100644
index 0000000..dae0a57
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7178-staging-fsl-mc-bus-Drop-warning.patch
@@ -0,0 +1,47 @@
+From 9821898bbfa5a21254baafe19b3cc97516fc6019 Mon Sep 17 00:00:00 2001
+From: Matthias Brugger <mbrugger at suse.com>
+Date: Thu, 14 Apr 2016 23:24:26 +0200
+Subject: [PATCH 178/226] staging: fsl-mc: bus: Drop warning
+
+When updating the irq_chip and msi_domain_ops, the code checkes for
+already present functions.
+When more then one ITS controller are present in the system,
+irq_chip and msi_domain_ops got already set and a warning is invoked.
+
+This patch deletes the warning, as the funtions are just already set to
+the needed callbacks.
+
+Signed-off-by: Matthias Brugger <mbrugger at suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-msi.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-msi.c
++++ b/drivers/staging/fsl-mc/bus/mc-msi.c
+@@ -37,10 +37,8 @@ static void fsl_mc_msi_update_dom_ops(st
+ /*
+ * set_desc should not be set by the caller
+ */
+- if (WARN_ON(ops->set_desc))
+- return;
+-
+- ops->set_desc = fsl_mc_msi_set_desc;
++ if (ops->set_desc == NULL)
++ ops->set_desc = fsl_mc_msi_set_desc;
+ }
+
+ static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
+@@ -129,10 +127,8 @@ static void fsl_mc_msi_update_chip_ops(s
+ /*
+ * irq_write_msi_msg should not be set by the caller
+ */
+- if (WARN_ON(chip->irq_write_msi_msg))
+- return;
+-
+- chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
++ if (chip->irq_write_msi_msg == NULL)
++ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+ }
+
+ /**
diff --git a/target/linux/layerscape/patches-4.4/7179-staging-fsl-mc-add-support-for-the-modalias-sysfs-at.patch b/target/linux/layerscape/patches-4.4/7179-staging-fsl-mc-add-support-for-the-modalias-sysfs-at.patch
new file mode 100644
index 0000000..030c195
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7179-staging-fsl-mc-add-support-for-the-modalias-sysfs-at.patch
@@ -0,0 +1,54 @@
+From 227c693741ce1fbf0ad146c87f03369334941f2e Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 22 Jun 2016 16:40:42 -0500
+Subject: [PATCH 179/226] staging: fsl-mc: add support for the modalias sysfs
+ attribute
+
+In order to support uevent based module loading implement modalias support
+for the fsl-mc bus driver. Aliases are based on vendor and object/device
+id and are of the form "fsl-mc:vNdN".
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -82,10 +82,35 @@ static int fsl_mc_bus_uevent(struct devi
+ return 0;
+ }
+
++static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++
++ return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
++ mc_dev->obj_desc.type);
++}
++static DEVICE_ATTR_RO(modalias);
++
++static struct attribute *fsl_mc_dev_attrs[] = {
++ &dev_attr_modalias.attr,
++ NULL,
++};
++
++static const struct attribute_group fsl_mc_dev_group = {
++ .attrs = fsl_mc_dev_attrs,
++};
++
++static const struct attribute_group *fsl_mc_dev_groups[] = {
++ &fsl_mc_dev_group,
++ NULL,
++};
++
+ struct bus_type fsl_mc_bus_type = {
+ .name = "fsl-mc",
+ .match = fsl_mc_bus_match,
+ .uevent = fsl_mc_bus_uevent,
++ .dev_groups = fsl_mc_dev_groups,
+ };
+ EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+
diff --git a/target/linux/layerscape/patches-4.4/7180-staging-fsl-mc-implement-uevent-callback-and-set-the.patch b/target/linux/layerscape/patches-4.4/7180-staging-fsl-mc-implement-uevent-callback-and-set-the.patch
new file mode 100644
index 0000000..266219b
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7180-staging-fsl-mc-implement-uevent-callback-and-set-the.patch
@@ -0,0 +1,32 @@
+From 721966c3990bc4596c6270afc1ea68c756b72f0d Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 22 Jun 2016 16:40:43 -0500
+Subject: [PATCH 180/226] staging: fsl-mc: implement uevent callback and set
+ the modalias
+
+Replace placeholder code in the uevent callback to properly
+set the MODALIAS env variable.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -78,7 +78,13 @@ out:
+ */
+ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+ {
+- pr_debug("%s invoked\n", __func__);
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++
++ if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
++ mc_dev->obj_desc.vendor,
++ mc_dev->obj_desc.type))
++ return -ENOMEM;
++
+ return 0;
+ }
+
diff --git a/target/linux/layerscape/patches-4.4/7181-staging-fsl-mc-clean-up-the-device-id-struct.patch b/target/linux/layerscape/patches-4.4/7181-staging-fsl-mc-clean-up-the-device-id-struct.patch
new file mode 100644
index 0000000..e5c58de
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7181-staging-fsl-mc-clean-up-the-device-id-struct.patch
@@ -0,0 +1,85 @@
+From c7b1e04ae4f47323800ca2b3d3430ecf1d9ed7df Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 22 Jun 2016 16:40:44 -0500
+Subject: [PATCH 181/226] staging: fsl-mc: clean up the device id struct
+
+-rename the struct used for fsl-mc device ids to be more
+ consistent with other busses
+-remove the now obsolete and unused version fields
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 2 +-
+ drivers/staging/fsl-mc/bus/mc-allocator.c | 2 +-
+ drivers/staging/fsl-mc/bus/mc-bus.c | 2 +-
+ drivers/staging/fsl-mc/include/mc.h | 10 +++-------
+ 4 files changed, 6 insertions(+), 10 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -805,7 +805,7 @@ static int dprc_remove(struct fsl_mc_dev
+ return 0;
+ }
+
+-static const struct fsl_mc_device_match_id match_id_table[] = {
++static const struct fsl_mc_device_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dprc"},
+--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
++++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
+@@ -717,7 +717,7 @@ static int fsl_mc_allocator_remove(struc
+ return 0;
+ }
+
+-static const struct fsl_mc_device_match_id match_id_table[] = {
++static const struct fsl_mc_device_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpbp",
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -36,7 +36,7 @@ static bool fsl_mc_is_root_dprc(struct d
+ */
+ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+ {
+- const struct fsl_mc_device_match_id *id;
++ const struct fsl_mc_device_id *id;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ bool found = false;
+--- a/drivers/staging/fsl-mc/include/mc.h
++++ b/drivers/staging/fsl-mc/include/mc.h
+@@ -39,7 +39,7 @@ struct fsl_mc_bus;
+ */
+ struct fsl_mc_driver {
+ struct device_driver driver;
+- const struct fsl_mc_device_match_id *match_id_table;
++ const struct fsl_mc_device_id *match_id_table;
+ int (*probe)(struct fsl_mc_device *dev);
+ int (*remove)(struct fsl_mc_device *dev);
+ void (*shutdown)(struct fsl_mc_device *dev);
+@@ -51,20 +51,16 @@ struct fsl_mc_driver {
+ container_of(_drv, struct fsl_mc_driver, driver)
+
+ /**
+- * struct fsl_mc_device_match_id - MC object device Id entry for driver matching
++ * struct fsl_mc_device_id - MC object device Id entry for driver matching
+ * @vendor: vendor ID
+ * @obj_type: MC object type
+- * @ver_major: MC object version major number
+- * @ver_minor: MC object version minor number
+ *
+ * Type of entries in the "device Id" table for MC object devices supported by
+ * a MC object device driver. The last entry of the table has vendor set to 0x0
+ */
+-struct fsl_mc_device_match_id {
++struct fsl_mc_device_id {
+ u16 vendor;
+ const char obj_type[16];
+- u32 ver_major;
+- u32 ver_minor;
+ };
+
+ /**
diff --git a/target/linux/layerscape/patches-4.4/7182-staging-fsl-mc-add-support-for-device-table-matching.patch b/target/linux/layerscape/patches-4.4/7182-staging-fsl-mc-add-support-for-device-table-matching.patch
new file mode 100644
index 0000000..5ebb459
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7182-staging-fsl-mc-add-support-for-device-table-matching.patch
@@ -0,0 +1,98 @@
+From bd83c4253992d263cb83108e26b4687058f11deb Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 22 Jun 2016 16:40:45 -0500
+Subject: [PATCH 182/226] staging: fsl-mc: add support for device table
+ matching
+
+Move the definition of fsl_mc_device_id to its proper location in
+mod_devicetable.h, and add fsl-mc bus support to devicetable-offsets.c
+and file2alias.c to enable device table matching. With this patch udev
+based module loading of fsl-mc drivers is supported.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/include/mc.h | 13 -------------
+ include/linux/mod_devicetable.h | 16 ++++++++++++++++
+ scripts/mod/devicetable-offsets.c | 4 ++++
+ scripts/mod/file2alias.c | 12 ++++++++++++
+ 4 files changed, 32 insertions(+), 13 deletions(-)
+
+--- a/drivers/staging/fsl-mc/include/mc.h
++++ b/drivers/staging/fsl-mc/include/mc.h
+@@ -51,19 +51,6 @@ struct fsl_mc_driver {
+ container_of(_drv, struct fsl_mc_driver, driver)
+
+ /**
+- * struct fsl_mc_device_id - MC object device Id entry for driver matching
+- * @vendor: vendor ID
+- * @obj_type: MC object type
+- *
+- * Type of entries in the "device Id" table for MC object devices supported by
+- * a MC object device driver. The last entry of the table has vendor set to 0x0
+- */
+-struct fsl_mc_device_id {
+- u16 vendor;
+- const char obj_type[16];
+-};
+-
+-/**
+ * enum fsl_mc_pool_type - Types of allocatable MC bus resources
+ *
+ * Entries in these enum are used as indices in the array of resource
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -657,4 +657,20 @@ struct ulpi_device_id {
+ kernel_ulong_t driver_data;
+ };
+
++/**
++ * struct fsl_mc_device_id - MC object device identifier
++ * @vendor: vendor ID
++ * @obj_type: MC object type
++ * @ver_major: MC object version major number
++ * @ver_minor: MC object version minor number
++ *
++ * Type of entries in the "device Id" table for MC object devices supported by
++ * a MC object device driver. The last entry of the table has vendor set to 0x0
++ */
++struct fsl_mc_device_id {
++ __u16 vendor;
++ const char obj_type[16];
++};
++
++
+ #endif /* LINUX_MOD_DEVICETABLE_H */
+--- a/scripts/mod/devicetable-offsets.c
++++ b/scripts/mod/devicetable-offsets.c
+@@ -202,5 +202,9 @@ int main(void)
+ DEVID_FIELD(hda_device_id, rev_id);
+ DEVID_FIELD(hda_device_id, api_version);
+
++ DEVID(fsl_mc_device_id);
++ DEVID_FIELD(fsl_mc_device_id, vendor);
++ DEVID_FIELD(fsl_mc_device_id, obj_type);
++
+ return 0;
+ }
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1271,6 +1271,18 @@ static int do_hda_entry(const char *file
+ }
+ ADD_TO_DEVTABLE("hdaudio", hda_device_id, do_hda_entry);
+
++/* Looks like: fsl-mc:vNdN */
++static int do_fsl_mc_entry(const char *filename, void *symval,
++ char *alias)
++{
++ DEF_FIELD(symval, fsl_mc_device_id, vendor);
++ DEF_FIELD_ADDR(symval, fsl_mc_device_id, obj_type);
++
++ sprintf(alias, "fsl-mc:v%08Xd%s", vendor, *obj_type);
++ return 1;
++}
++ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry);
++
+ /* Does namelen bytes of name exactly match the symbol? */
+ static bool sym_is(const char *name, unsigned namelen, const char *symbol)
+ {
diff --git a/target/linux/layerscape/patches-4.4/7183-staging-fsl-mc-export-mc_get_version.patch b/target/linux/layerscape/patches-4.4/7183-staging-fsl-mc-export-mc_get_version.patch
new file mode 100644
index 0000000..330a1a8
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7183-staging-fsl-mc-export-mc_get_version.patch
@@ -0,0 +1,23 @@
+From 4087dc71f82a71c25f9d051773094f4ae3f4238d Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 22 Jun 2016 16:40:46 -0500
+Subject: [PATCH 183/226] staging: fsl-mc: export mc_get_version
+
+some drivers (built as modules) rely on mc_get_version()
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dpmng.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/staging/fsl-mc/bus/dpmng.c
++++ b/drivers/staging/fsl-mc/bus/dpmng.c
+@@ -67,6 +67,7 @@ int mc_get_version(struct fsl_mc_io *mc_
+
+ return 0;
+ }
++EXPORT_SYMBOL(mc_get_version);
+
+ /**
+ * dpmng_get_container_id() - Get container ID associated with a given portal.
diff --git a/target/linux/layerscape/patches-4.4/7184-staging-fsl-mc-make-fsl_mc_is_root_dprc-global.patch b/target/linux/layerscape/patches-4.4/7184-staging-fsl-mc-make-fsl_mc_is_root_dprc-global.patch
new file mode 100644
index 0000000..92b9c59
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7184-staging-fsl-mc-make-fsl_mc_is_root_dprc-global.patch
@@ -0,0 +1,77 @@
+From 82981b28f3a8a7f4ac61d8dc87a0abaeebfbe6dc Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 22 Jun 2016 16:40:47 -0500
+Subject: [PATCH 184/226] staging: fsl-mc: make fsl_mc_is_root_dprc() global
+
+make fsl_mc_is_root_dprc() global so that the dprc driver
+can use it
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 28 +++++++++++++---------------
+ drivers/staging/fsl-mc/include/mc.h | 2 ++
+ 2 files changed, 15 insertions(+), 15 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -24,8 +24,6 @@
+
+ static struct kmem_cache *mc_dev_cache;
+
+-static bool fsl_mc_is_root_dprc(struct device *dev);
+-
+ /**
+ * fsl_mc_bus_match - device to driver matching callback
+ * @dev: the MC object device structure to match against
+@@ -247,19 +245,6 @@ static void fsl_mc_get_root_dprc(struct
+ }
+ }
+
+-/**
+- * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
+- */
+-static bool fsl_mc_is_root_dprc(struct device *dev)
+-{
+- struct device *root_dprc_dev;
+-
+- fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+- if (!root_dprc_dev)
+- return false;
+- return dev == root_dprc_dev;
+-}
+-
+ static int get_dprc_attr(struct fsl_mc_io *mc_io,
+ int container_id, struct dprc_attributes *attr)
+ {
+@@ -424,6 +409,19 @@ error_cleanup_regions:
+ }
+
+ /**
++ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
++ */
++bool fsl_mc_is_root_dprc(struct device *dev)
++{
++ struct device *root_dprc_dev;
++
++ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
++ if (!root_dprc_dev)
++ return false;
++ return dev == root_dprc_dev;
++}
++
++/**
+ * Add a newly discovered MC object device to be visible in Linux
+ */
+ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+--- a/drivers/staging/fsl-mc/include/mc.h
++++ b/drivers/staging/fsl-mc/include/mc.h
+@@ -207,6 +207,8 @@ int __must_check fsl_mc_allocate_irqs(st
+
+ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+
++bool fsl_mc_is_root_dprc(struct device *dev);
++
+ extern struct bus_type fsl_mc_bus_type;
+
+ #endif /* _FSL_MC_H_ */
diff --git a/target/linux/layerscape/patches-4.4/7185-staging-fsl-mc-fix-asymmetry-in-destroy-of-mc_io.patch b/target/linux/layerscape/patches-4.4/7185-staging-fsl-mc-fix-asymmetry-in-destroy-of-mc_io.patch
new file mode 100644
index 0000000..b148c76
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7185-staging-fsl-mc-fix-asymmetry-in-destroy-of-mc_io.patch
@@ -0,0 +1,62 @@
+From 4e55a4c296d3a93c95320cdac0b8e72f3cfefb98 Mon Sep 17 00:00:00 2001
+From: Bharat Bhushan <Bharat.Bhushan at nxp.com>
+Date: Wed, 22 Jun 2016 16:40:48 -0500
+Subject: [PATCH 185/226] staging: fsl-mc: fix asymmetry in destroy of mc_io
+
+An mc_io represents a mapped MC portal. Previously, an mc_io was
+created for the root dprc in fsl_mc_bus_probe() and for child dprcs
+in dprc_probe(). But the free of that data structure happened in the
+general bus remove callback. This asymmetry resulted in some bugs due
+to unwanted destroys of mc_io object in some scenarios (e.g. vfio).
+
+Fix this bug by making things symmetric-- mc_io created in
+fsl_mc_bus_probe() is freed in fsl_mc_bus_remove(). The mc_io created
+in dprc_probe() is freed in dprc_remove().
+
+Signed-off-by: Bharat Bhushan <Bharat.Bhushan at nxp.com>
+[Stuart: added check for root dprc and reworded commit message]
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 5 +++++
+ drivers/staging/fsl-mc/bus/mc-bus.c | 8 ++++----
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -801,6 +801,11 @@ static int dprc_remove(struct fsl_mc_dev
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+ }
+
++ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
++ fsl_destroy_mc_io(mc_dev->mc_io);
++ mc_dev->mc_io = NULL;
++ }
++
+ dev_info(&mc_dev->dev, "DPRC device unbound from driver");
+ return 0;
+ }
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -579,10 +579,6 @@ void fsl_mc_device_remove(struct fsl_mc_
+
+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
+ mc_bus = to_fsl_mc_bus(mc_dev);
+- if (mc_dev->mc_io) {
+- fsl_destroy_mc_io(mc_dev->mc_io);
+- mc_dev->mc_io = NULL;
+- }
+
+ if (fsl_mc_is_root_dprc(&mc_dev->dev)) {
+ if (atomic_read(&root_dprc_count) > 0)
+@@ -810,6 +806,10 @@ static int fsl_mc_bus_remove(struct plat
+ return -EINVAL;
+
+ fsl_mc_device_remove(mc->root_mc_bus_dev);
++
++ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
++ mc->root_mc_bus_dev->mc_io = NULL;
++
+ dev_info(&pdev->dev, "Root MC bus device removed");
+ return 0;
+ }
diff --git a/target/linux/layerscape/patches-4.4/7186-staging-fsl-mc-dprc-add-missing-irq-free.patch b/target/linux/layerscape/patches-4.4/7186-staging-fsl-mc-dprc-add-missing-irq-free.patch
new file mode 100644
index 0000000..0a485ec
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7186-staging-fsl-mc-dprc-add-missing-irq-free.patch
@@ -0,0 +1,28 @@
+From 159abffaa5e2acf910b5e4cdca81a7b6d2dd958f Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 22 Jun 2016 16:40:49 -0500
+Subject: [PATCH 186/226] staging: fsl-mc: dprc: add missing irq free
+
+add missing free of the Linux irq when tearing down interrupts
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -760,7 +760,12 @@ error_cleanup_msi_domain:
+ */
+ static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
+ {
++ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
++
+ (void)disable_dprc_irq(mc_dev);
++
++ devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
++
+ fsl_mc_free_irqs(mc_dev);
+ }
+
diff --git a/target/linux/layerscape/patches-4.4/7187-staging-fsl-mc-dprc-fix-ordering-problem-freeing-res.patch b/target/linux/layerscape/patches-4.4/7187-staging-fsl-mc-dprc-fix-ordering-problem-freeing-res.patch
new file mode 100644
index 0000000..c03e8d9
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7187-staging-fsl-mc-dprc-fix-ordering-problem-freeing-res.patch
@@ -0,0 +1,41 @@
+From b104ed7497745e2e6da214b37ef22edaf38098c7 Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 22 Jun 2016 16:40:50 -0500
+Subject: [PATCH 187/226] staging: fsl-mc: dprc: fix ordering problem freeing
+ resources in remove of dprc
+
+When unbinding a dprc from the dprc driver the cleanup of
+the resource pools must happen after irq pool cleanup
+is done.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -796,16 +796,18 @@ static int dprc_remove(struct fsl_mc_dev
+ dprc_teardown_irq(mc_dev);
+
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+- dprc_cleanup_all_resource_pools(mc_dev);
+- error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+- if (error < 0)
+- dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
+
+ if (dev_get_msi_domain(&mc_dev->dev)) {
+ fsl_mc_cleanup_irq_pool(mc_bus);
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+ }
+
++ dprc_cleanup_all_resource_pools(mc_dev);
++
++ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
++ if (error < 0)
++ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
++
+ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
diff --git a/target/linux/layerscape/patches-4.4/7188-staging-fsl-mc-properly-set-hwirq-in-msi-set_desc.patch b/target/linux/layerscape/patches-4.4/7188-staging-fsl-mc-properly-set-hwirq-in-msi-set_desc.patch
new file mode 100644
index 0000000..34aadad
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7188-staging-fsl-mc-properly-set-hwirq-in-msi-set_desc.patch
@@ -0,0 +1,48 @@
+From f5f9462cb947922817225b69240740e637de0149 Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 22 Jun 2016 16:40:51 -0500
+Subject: [PATCH 188/226] staging: fsl-mc: properly set hwirq in msi set_desc
+
+For an MSI domain the hwirq is an arbitrary but unique
+id to identify an interrupt. Previously the hwirq was set to
+the MSI index of the interrupt, but that only works if there is
+one DPRC. Additional DPRCs require an expanded namespace. Use
+both the ICID (which is unique per DPRC) and the MSI index to
+compose a hwirq value.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ drivers/staging/fsl-mc/bus/mc-msi.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-msi.c
++++ b/drivers/staging/fsl-mc/bus/mc-msi.c
+@@ -20,11 +20,26 @@
+ #include "../include/mc-sys.h"
+ #include "dprc-cmd.h"
+
++/*
++ * Generate a unique ID identifying the interrupt (only used within the MSI
++ * irqdomain. Combine the icid with the interrupt index.
++ */
++static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
++ struct msi_desc *desc)
++{
++ /*
++ * Make the base hwirq value for ICID*10000 so it is readable
++ * as a decimal value in /proc/interrupts.
++ */
++ return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
++}
++
+ static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+ {
+ arg->desc = desc;
+- arg->hwirq = (irq_hw_number_t)desc->fsl_mc.msi_index;
++ arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
++ desc);
+ }
+
+ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
diff --git a/target/linux/layerscape/patches-4.4/7189-staging-fsl-mc-update-dpcon-binary-interface-to-v2.2.patch b/target/linux/layerscape/patches-4.4/7189-staging-fsl-mc-update-dpcon-binary-interface-to-v2.2.patch
new file mode 100644
index 0000000..5f60ea7
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7189-staging-fsl-mc-update-dpcon-binary-interface-to-v2.2.patch
@@ -0,0 +1,964 @@
+From 95c8565453e068db2664b5ee9cb0b7eced9a8d24 Mon Sep 17 00:00:00 2001
+From: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Date: Fri, 3 Jul 2015 19:02:45 +0300
+Subject: [PATCH 189/226] staging: fsl-mc: update dpcon binary interface to
+ v2.2
+
+-this includes adding the command building/parsing
+ wrapper functions
+
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+---
+ drivers/staging/fsl-mc/bus/Makefile | 3 +-
+ drivers/staging/fsl-mc/bus/dpcon.c | 407 ++++++++++++++++++++++++++++
+ drivers/staging/fsl-mc/include/dpcon-cmd.h | 102 ++++++-
+ drivers/staging/fsl-mc/include/dpcon.h | 407 ++++++++++++++++++++++++++++
+ 4 files changed, 917 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c
+ create mode 100644 drivers/staging/fsl-mc/include/dpcon.h
+
+--- a/drivers/staging/fsl-mc/bus/Makefile
++++ b/drivers/staging/fsl-mc/bus/Makefile
+@@ -16,4 +16,5 @@ mc-bus-driver-objs := mc-bus.o \
+ mc-msi.o \
+ irq-gic-v3-its-fsl-mc-msi.o \
+ dpmcp.o \
+- dpbp.o
++ dpbp.o \
++ dpcon.o
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpcon.c
+@@ -0,0 +1,407 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../include/mc-sys.h"
++#include "../include/mc-cmd.h"
++#include "../include/dpcon.h"
++#include "../include/dpcon-cmd.h"
++
++int dpcon_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpcon_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPCON_CMD_OPEN(cmd, dpcon_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpcon_open);
++
++int dpcon_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_close);
++
++int dpcon_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpcon_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPCON_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpcon_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_enable);
++
++int dpcon_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_disable);
++
++int dpcon_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpcon_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
++ cmd_flags, token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpcon_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpcon_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPCON_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpcon_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpcon_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPCON_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpcon_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpcon_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpcon_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpcon_get_attributes);
++
++int dpcon_set_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpcon_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPCON_CMD_SET_NOTIFICATION(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL(dpcon_set_notification);
++
+--- a/drivers/staging/fsl-mc/include/dpcon-cmd.h
++++ b/drivers/staging/fsl-mc/include/dpcon-cmd.h
+@@ -34,7 +34,7 @@
+
+ /* DPCON Version */
+ #define DPCON_VER_MAJOR 2
+-#define DPCON_VER_MINOR 1
++#define DPCON_VER_MINOR 2
+
+ /* Command IDs */
+ #define DPCON_CMDID_CLOSE 0x800
+@@ -59,4 +59,104 @@
+
+ #define DPCON_CMDID_SET_NOTIFICATION 0x100
+
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_OPEN(cmd, dpcon_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_CREATE(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\
++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
++} while (0)
++
+ #endif /* _FSL_DPCON_CMD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpcon.h
+@@ -0,0 +1,407 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPCON_H
++#define __FSL_DPCON_H
++
++/* Data Path Concentrator API
++ * Contains initialization APIs and runtime control APIs for DPCON
++ */
++
++struct fsl_mc_io;
++
++/** General DPCON macros */
++
++/**
++ * Use it to disable notifications; see dpcon_set_notification()
++ */
++#define DPCON_INVALID_DPIO_ID (int)(-1)
++
++/**
++ * dpcon_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpcon_id: DPCON unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpcon_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpcon_id,
++ uint16_t *token);
++
++/**
++ * dpcon_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpcon_cfg - Structure representing DPCON configuration
++ * @num_priorities: Number of priorities for the DPCON channel (1-8)
++ */
++struct dpcon_cfg {
++ uint8_t num_priorities;
++};
++
++/**
++ * dpcon_create() - Create the DPCON object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPCON object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpcon_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpcon_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpcon_destroy() - Destroy the DPCON object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpcon_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpcon_enable() - Enable the DPCON
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpcon_disable() - Disable the DPCON
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpcon_is_enabled() - Check if the DPCON is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpcon_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpcon_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpcon_set_irq() - Set IRQ information for the DPCON to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpcon_irq_cfg *irq_cfg);
++
++/**
++ * dpcon_get_irq() - Get IRQ information from the DPCON.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpcon_irq_cfg *irq_cfg);
++
++/**
++ * dpcon_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpcon_get_irq_enable() - Get overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpcon_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpcon_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpcon_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @status: interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpcon_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpcon_attr - Structure representing DPCON attributes
++ * @id: DPCON object ID
++ * @version: DPCON version
++ * @qbman_ch_id: Channel ID to be used by dequeue operation
++ * @num_priorities: Number of priorities for the DPCON channel (1-8)
++ */
++struct dpcon_attr {
++ int id;
++ /**
++ * struct version - DPCON version
++ * @major: DPCON major version
++ * @minor: DPCON minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint16_t qbman_ch_id;
++ uint8_t num_priorities;
++};
++
++/**
++ * dpcon_get_attributes() - Retrieve DPCON attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @attr: Object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpcon_attr *attr);
++
++/**
++ * struct dpcon_notification_cfg - Structure representing notification parameters
++ * @dpio_id: DPIO object ID; must be configured with a notification channel;
++ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
++ * @priority: Priority selection within the DPIO channel; valid values
++ * are 0-7, depending on the number of priorities in that channel
++ * @user_ctx: User context value provided with each CDAN message
++ */
++struct dpcon_notification_cfg {
++ int dpio_id;
++ uint8_t priority;
++ uint64_t user_ctx;
++};
++
++/**
++ * dpcon_set_notification() - Set DPCON notification destination
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @cfg: Notification parameters
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_set_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpcon_notification_cfg *cfg);
++
++#endif /* __FSL_DPCON_H */
diff --git a/target/linux/layerscape/patches-4.4/7190-staging-fsl-mc-root-dprc-rescan-attribute-to-sync-ke.patch b/target/linux/layerscape/patches-4.4/7190-staging-fsl-mc-root-dprc-rescan-attribute-to-sync-ke.patch
new file mode 100644
index 0000000..a38907b
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7190-staging-fsl-mc-root-dprc-rescan-attribute-to-sync-ke.patch
@@ -0,0 +1,59 @@
+From 75b607ff8725eac74f3375b3370f7d121d1827a3 Mon Sep 17 00:00:00 2001
+From: Lijun Pan <Lijun.Pan at freescale.com>
+Date: Mon, 8 Feb 2016 17:40:14 -0600
+Subject: [PATCH 190/226] staging: fsl-mc: root dprc rescan attribute to sync
+ kernel with MC
+
+Introduce the rescan attribute as a device attribute to
+synchronize the fsl-mc bus objects and the MC firmware.
+
+To rescan the root dprc only, e.g.
+echo 1 > /sys/bus/fsl-mc/devices/dprc.1/rescan
+
+Signed-off-by: Lijun Pan <Lijun.Pan at freescale.com>
+[Stuart: resolved merge conflict]
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -96,8 +96,37 @@ static ssize_t modalias_show(struct devi
+ }
+ static DEVICE_ATTR_RO(modalias);
+
++static ssize_t rescan_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long val;
++ unsigned int irq_count;
++ struct fsl_mc_device *root_mc_dev;
++ struct fsl_mc_bus *root_mc_bus;
++
++ if (!fsl_mc_is_root_dprc(dev))
++ return -EINVAL;
++
++ root_mc_dev = to_fsl_mc_device(dev);
++ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
++
++ if (kstrtoul(buf, 0, &val) < 0)
++ return -EINVAL;
++
++ if (val) {
++ mutex_lock(&root_mc_bus->scan_mutex);
++ dprc_scan_objects(root_mc_dev, &irq_count);
++ mutex_unlock(&root_mc_bus->scan_mutex);
++ }
++
++ return count;
++}
++static DEVICE_ATTR_WO(rescan);
++
+ static struct attribute *fsl_mc_dev_attrs[] = {
+ &dev_attr_modalias.attr,
++ &dev_attr_rescan.attr,
+ NULL,
+ };
+
diff --git a/target/linux/layerscape/patches-4.4/7191-staging-fsl-mc-bus-rescan-attribute-to-sync-kernel-w.patch b/target/linux/layerscape/patches-4.4/7191-staging-fsl-mc-bus-rescan-attribute-to-sync-kernel-w.patch
new file mode 100644
index 0000000..d89815d
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7191-staging-fsl-mc-bus-rescan-attribute-to-sync-kernel-w.patch
@@ -0,0 +1,78 @@
+From 417d71b1e291725c01893bf1553478924d05952f Mon Sep 17 00:00:00 2001
+From: Lijun Pan <Lijun.Pan at freescale.com>
+Date: Mon, 8 Feb 2016 17:40:16 -0600
+Subject: [PATCH 191/226] staging: fsl-mc: bus rescan attribute to sync kernel
+ with MC
+
+Introduce the rescan attribute as a bus attribute to
+synchronize the fsl-mc bus objects and the MC firmware.
+
+To rescan the fsl-mc bus, e.g.,
+echo 1 > /sys/bus/fsl-mc/rescan
+
+Signed-off-by: Lijun Pan <Lijun.Pan at freescale.com>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 47 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 47 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -139,11 +139,58 @@ static const struct attribute_group *fsl
+ NULL,
+ };
+
++static int scan_fsl_mc_bus(struct device *dev, void *data)
++{
++ unsigned int irq_count;
++ struct fsl_mc_device *root_mc_dev;
++ struct fsl_mc_bus *root_mc_bus;
++
++ if (fsl_mc_is_root_dprc(dev)) {
++ root_mc_dev = to_fsl_mc_device(dev);
++ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
++ mutex_lock(&root_mc_bus->scan_mutex);
++ dprc_scan_objects(root_mc_dev, &irq_count);
++ mutex_unlock(&root_mc_bus->scan_mutex);
++ }
++
++ return 0;
++}
++
++static ssize_t bus_rescan_store(struct bus_type *bus,
++ const char *buf, size_t count)
++{
++ unsigned long val;
++
++ if (kstrtoul(buf, 0, &val) < 0)
++ return -EINVAL;
++
++ if (val)
++ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
++
++ return count;
++}
++static BUS_ATTR(rescan, (S_IWUSR | S_IWGRP), NULL, bus_rescan_store);
++
++static struct attribute *fsl_mc_bus_attrs[] = {
++ &bus_attr_rescan.attr,
++ NULL,
++};
++
++static const struct attribute_group fsl_mc_bus_group = {
++ .attrs = fsl_mc_bus_attrs,
++};
++
++static const struct attribute_group *fsl_mc_bus_groups[] = {
++ &fsl_mc_bus_group,
++ NULL,
++};
++
+ struct bus_type fsl_mc_bus_type = {
+ .name = "fsl-mc",
+ .match = fsl_mc_bus_match,
+ .uevent = fsl_mc_bus_uevent,
+ .dev_groups = fsl_mc_dev_groups,
++ .bus_groups = fsl_mc_bus_groups,
+ };
+ EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+
diff --git a/target/linux/layerscape/patches-4.4/7192-staging-fsl-mc-Propagate-driver_override-for-a-child.patch b/target/linux/layerscape/patches-4.4/7192-staging-fsl-mc-Propagate-driver_override-for-a-child.patch
new file mode 100644
index 0000000..20b952e
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7192-staging-fsl-mc-Propagate-driver_override-for-a-child.patch
@@ -0,0 +1,193 @@
+From 2b9110586a96afc0d0e246835da176c48ae7c973 Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Fri, 13 Mar 2015 15:03:32 -0500
+Subject: [PATCH 192/226] staging: fsl-mc: Propagate driver_override for a
+ child DPRC's children
+
+When a child DPRC is bound to the vfio_fsl_mc driver via driver_override,
+its own children should not be bound to corresponding host kernel
+drivers, but instead should be bound to the vfio_fsl_mc driver as
+well.
+
+Currently, when a child container is scanned by the vfio_fsl_mc driver,
+child devices found are automatically bound to corresponding host kernel
+drivers (e.g., DPMCP and DPBP objects are bound to the fsl_mc_allocator
+driver, DPNI objects are bound to the ldpaa_eth driver, etc), Then,
+the user has to manually unbind these child devices from their drivers,
+set the driver_override sysfs attribute to vfio_fsl_mc driver, for each
+of them and rebind them.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 14 ++++++++++----
+ drivers/staging/fsl-mc/bus/mc-bus.c | 20 +++++++++++++++++---
+ drivers/staging/fsl-mc/include/mc-private.h | 2 ++
+ drivers/staging/fsl-mc/include/mc.h | 2 ++
+ 4 files changed, 31 insertions(+), 7 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -152,6 +152,8 @@ static void check_plugged_state_change(s
+ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ * @driver_override: driver override to apply to new objects found in the DPRC,
++ * or NULL, if none.
+ * @obj_desc_array: array of device descriptors for child devices currently
+ * present in the physical DPRC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+@@ -161,6 +163,7 @@ static void check_plugged_state_change(s
+ * in the physical DPRC.
+ */
+ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
+ struct dprc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+ {
+@@ -184,7 +187,7 @@ static void dprc_add_new_devices(struct
+ }
+
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+- &child_dev);
++ driver_override, &child_dev);
+ if (error < 0)
+ continue;
+ }
+@@ -243,6 +246,8 @@ static void dprc_cleanup_all_resource_po
+ * dprc_scan_objects - Discover objects in a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ * @driver_override: driver override to apply to new objects found in the DPRC,
++ * or NULL, if none.
+ * @total_irq_count: total number of IRQs needed by objects in the DPRC.
+ *
+ * Detects objects added and removed from a DPRC and synchronizes the
+@@ -258,6 +263,7 @@ static void dprc_cleanup_all_resource_po
+ * of the device drivers for the non-allocatable devices.
+ */
+ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
+ unsigned int *total_irq_count)
+ {
+ int num_child_objects;
+@@ -338,7 +344,7 @@ int dprc_scan_objects(struct fsl_mc_devi
+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+- dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
++ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array,
+ num_child_objects);
+
+ if (child_obj_desc_array)
+@@ -369,7 +375,7 @@ int dprc_scan_container(struct fsl_mc_de
+ * Discover objects in the DPRC:
+ */
+ mutex_lock(&mc_bus->scan_mutex);
+- error = dprc_scan_objects(mc_bus_dev, &irq_count);
++ error = dprc_scan_objects(mc_bus_dev, NULL, &irq_count);
+ mutex_unlock(&mc_bus->scan_mutex);
+ if (error < 0)
+ goto error;
+@@ -456,7 +462,7 @@ static irqreturn_t dprc_irq0_handler_thr
+ DPRC_IRQ_EVENT_OBJ_CREATED)) {
+ unsigned int irq_count;
+
+- error = dprc_scan_objects(mc_dev, &irq_count);
++ error = dprc_scan_objects(mc_dev, NULL, &irq_count);
+ if (error < 0) {
+ /*
+ * If the error is -ENXIO, we ignore it, as it indicates
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -116,7 +116,7 @@ static ssize_t rescan_store(struct devic
+
+ if (val) {
+ mutex_lock(&root_mc_bus->scan_mutex);
+- dprc_scan_objects(root_mc_dev, &irq_count);
++ dprc_scan_objects(root_mc_dev, NULL, &irq_count);
+ mutex_unlock(&root_mc_bus->scan_mutex);
+ }
+
+@@ -149,7 +149,7 @@ static int scan_fsl_mc_bus(struct device
+ root_mc_dev = to_fsl_mc_device(dev);
+ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
+ mutex_lock(&root_mc_bus->scan_mutex);
+- dprc_scan_objects(root_mc_dev, &irq_count);
++ dprc_scan_objects(root_mc_dev, NULL, &irq_count);
+ mutex_unlock(&root_mc_bus->scan_mutex);
+ }
+
+@@ -503,6 +503,7 @@ bool fsl_mc_is_root_dprc(struct device *
+ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
++ const char *driver_override,
+ struct fsl_mc_device **new_mc_dev)
+ {
+ int error;
+@@ -535,6 +536,18 @@ int fsl_mc_device_add(struct dprc_obj_de
+
+ mc_dev->obj_desc = *obj_desc;
+ mc_dev->mc_io = mc_io;
++ if (driver_override) {
++ /*
++ * We trust driver_override, so we don't need to use
++ * kstrndup() here
++ */
++ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL);
++ if (!mc_dev->driver_override) {
++ error = -ENOMEM;
++ goto error_cleanup_dev;
++ }
++ }
++
+ device_initialize(&mc_dev->dev);
+ mc_dev->dev.parent = parent_dev;
+ mc_dev->dev.bus = &fsl_mc_bus_type;
+@@ -858,7 +871,8 @@ static int fsl_mc_bus_probe(struct platf
+ obj_desc.irq_count = 1;
+ obj_desc.region_count = 0;
+
+- error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
++ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL,
++ &mc_bus_dev);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+--- a/drivers/staging/fsl-mc/include/mc-private.h
++++ b/drivers/staging/fsl-mc/include/mc-private.h
+@@ -110,6 +110,7 @@ struct fsl_mc_bus {
+ int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
++ const char *driver_override,
+ struct fsl_mc_device **new_mc_dev);
+
+ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
+@@ -117,6 +118,7 @@ void fsl_mc_device_remove(struct fsl_mc_
+ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
+
+ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
+ unsigned int *total_irq_count);
+
+ int __init dprc_driver_init(void);
+--- a/drivers/staging/fsl-mc/include/mc.h
++++ b/drivers/staging/fsl-mc/include/mc.h
+@@ -129,6 +129,7 @@ struct fsl_mc_device_irq {
+ * @regions: pointer to array of MMIO region entries
+ * @irqs: pointer to array of pointers to interrupts allocated to this device
+ * @resource: generic resource associated with this MC object device, if any.
++ * @driver_override: Driver name to force a match
+ *
+ * Generic device object for MC object devices that are "attached" to a
+ * MC bus.
+@@ -161,6 +162,7 @@ struct fsl_mc_device {
+ struct resource *regions;
+ struct fsl_mc_device_irq **irqs;
+ struct fsl_mc_resource *resource;
++ const char *driver_override;
+ };
+
+ #define to_fsl_mc_device(_dev) \
diff --git a/target/linux/layerscape/patches-4.4/7193-staging-fsl-mc-add-device-binding-path-driver_overri.patch b/target/linux/layerscape/patches-4.4/7193-staging-fsl-mc-add-device-binding-path-driver_overri.patch
new file mode 100644
index 0000000..60d294a
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7193-staging-fsl-mc-add-device-binding-path-driver_overri.patch
@@ -0,0 +1,111 @@
+From 0bda83c15b2ecfc45fac0656df15d4f4fa65afa9 Mon Sep 17 00:00:00 2001
+From: Bharat Bhushan <bharat.bhushan at freescale.com>
+Date: Wed, 18 Mar 2015 17:32:59 -0500
+Subject: [PATCH 193/226] staging: fsl-mc: add device binding path
+ 'driver_override'
+
+This patch is required for vfio-fsl-mc meta driver to successfully bind
+layerscape container devices for device passthrough. This patch adds
+a mechanism to allow a layerscape device to specify a driver rather than
+a layerscape driver provide a device match.
+
+This patch is based on following proposed patches for PCI and platform devices
+- https://lkml.org/lkml/2014/4/8/571 :- For Platform devices
+- http://lists-archives.com/linux-kernel/28030441-pci-introduce-new-device-binding-path-using-pci_dev-driver_override.html :- For PCI devices
+
+Example to allow a device (dprc.1) to specifically bind
+with driver (vfio-fsl-mc):-
+- echo vfio-fsl-mc > /sys/bus/fsl-mc/devices/dprc.1/driver_override
+- echo dprc.1 > /sys/bus/fsl-mc/drivers/fsl_mc_dprc/unbind
+- echo dprc.1 > /sys/bus/fsl-mc/drivers/vfio-fsl-mc/bind
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+(Stuart: resolved merge conflicts)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 53 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 53 insertions(+)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -42,6 +42,12 @@ static int fsl_mc_bus_match(struct devic
+ if (WARN_ON(!fsl_mc_bus_exists()))
+ goto out;
+
++ /* When driver_override is set, only bind to the matching driver */
++ if (mc_dev->driver_override) {
++ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
++ goto out;
++ }
++
+ if (!mc_drv->match_id_table)
+ goto out;
+
+@@ -96,6 +102,50 @@ static ssize_t modalias_show(struct devi
+ }
+ static DEVICE_ATTR_RO(modalias);
+
++static ssize_t driver_override_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ const char *driver_override, *old = mc_dev->driver_override;
++ char *cp;
++
++ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
++ return -EINVAL;
++
++ if (count > PATH_MAX)
++ return -EINVAL;
++
++ driver_override = kstrndup(buf, count, GFP_KERNEL);
++ if (!driver_override)
++ return -ENOMEM;
++
++ cp = strchr(driver_override, '\n');
++ if (cp)
++ *cp = '\0';
++
++ if (strlen(driver_override)) {
++ mc_dev->driver_override = driver_override;
++ } else {
++ kfree(driver_override);
++ mc_dev->driver_override = NULL;
++ }
++
++ kfree(old);
++
++ return count;
++}
++
++static ssize_t driver_override_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++
++ return sprintf(buf, "%s\n", mc_dev->driver_override);
++}
++
++static DEVICE_ATTR_RW(driver_override);
++
+ static ssize_t rescan_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+@@ -127,6 +177,7 @@ static DEVICE_ATTR_WO(rescan);
+ static struct attribute *fsl_mc_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ &dev_attr_rescan.attr,
++ &dev_attr_driver_override.attr,
+ NULL,
+ };
+
+@@ -677,6 +728,8 @@ void fsl_mc_device_remove(struct fsl_mc_
+ }
+ }
+
++ kfree(mc_dev->driver_override);
++ mc_dev->driver_override = NULL;
+ if (mc_bus)
+ devm_kfree(mc_dev->dev.parent, mc_bus);
+ else
diff --git a/target/linux/layerscape/patches-4.4/7194-staging-fsl-mc-export-irq-cleanup-for-vfio-to-use.patch b/target/linux/layerscape/patches-4.4/7194-staging-fsl-mc-export-irq-cleanup-for-vfio-to-use.patch
new file mode 100644
index 0000000..0b92685
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7194-staging-fsl-mc-export-irq-cleanup-for-vfio-to-use.patch
@@ -0,0 +1,47 @@
+From 552d628c887d970b9a97d8db2629adc4820fb8e3 Mon Sep 17 00:00:00 2001
+From: Bharat Bhushan <Bharat.Bhushan at freescale.com>
+Date: Thu, 16 Jul 2015 14:44:24 +0530
+Subject: [PATCH 194/226] staging: fsl-mc: export irq cleanup for vfio to use
+
+VFIO driver needs these basic functions for
+setting up itt/its of dprc's bound to it.
+
+Signed-off-by: Bharat Bhushan <Bharat.Bhushan at freescale.com>
+(Stuart: resolved merge conflict, commit log cleanup)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 4 ++--
+ drivers/staging/fsl-mc/include/mc-private.h | 4 ++++
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
+@@ -193,7 +193,7 @@ static void dprc_add_new_devices(struct
+ }
+ }
+
+-static void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
++void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+ {
+ int pool_type;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+@@ -234,7 +234,7 @@ static void dprc_cleanup_resource_pool(s
+ WARN_ON(free_count != res_pool->free_count);
+ }
+
+-static void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
++void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+ {
+ int pool_type;
+
+--- a/drivers/staging/fsl-mc/include/mc-private.h
++++ b/drivers/staging/fsl-mc/include/mc-private.h
+@@ -157,4 +157,8 @@ int fsl_mc_populate_irq_pool(struct fsl_
+
+ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
+
++void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
++
++void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
++
+ #endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/target/linux/layerscape/patches-4.4/7195-increment-MC_CMD_COMPLETION_TIMEOUT_MS.patch b/target/linux/layerscape/patches-4.4/7195-increment-MC_CMD_COMPLETION_TIMEOUT_MS.patch
new file mode 100644
index 0000000..438234b
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7195-increment-MC_CMD_COMPLETION_TIMEOUT_MS.patch
@@ -0,0 +1,88 @@
+From 71d19cd1107fa435d056e08e7d7ef7d8f714cf35 Mon Sep 17 00:00:00 2001
+From: Lijun Pan <Lijun.Pan at freescale.com>
+Date: Fri, 31 Jul 2015 15:07:32 -0500
+Subject: [PATCH 195/226] increment MC_CMD_COMPLETION_TIMEOUT_MS
+
+5000ms is barely enough for dpsw/dpdmux creation.
+If MC firmware could run faster, we would decrement the value later on.
+
+Signed-off-by: Lijun Pan <Lijun.Pan at freescale.com>
+(Stuart: resolved merge conflict)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-mc/bus/mc-sys.c | 38 +++++++++++++++--------------------
+ 1 file changed, 16 insertions(+), 22 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-sys.c
++++ b/drivers/staging/fsl-mc/bus/mc-sys.c
+@@ -43,8 +43,10 @@
+
+ /**
+ * Timeout in milliseconds to wait for the completion of an MC command
++ * 5000 ms is barely enough for dpsw/dpdmux creation
++ * TODO: if MC firmware could response faster, we should decrease this value
+ */
+-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
++#define MC_CMD_COMPLETION_TIMEOUT_MS 5000
+
+ /*
+ * usleep_range() min and max values used to throttle down polling
+@@ -327,17 +329,8 @@ static int mc_polling_wait_preemptible(s
+ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+
+- if (time_after_eq(jiffies, jiffies_until_timeout)) {
+- dev_dbg(mc_io->dev,
+- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+- mc_io->portal_phys_addr,
+- (unsigned int)
+- MC_CMD_HDR_READ_TOKEN(cmd->header),
+- (unsigned int)
+- MC_CMD_HDR_READ_CMDID(cmd->header));
+-
++ if (time_after_eq(jiffies, jiffies_until_timeout))
+ return -ETIMEDOUT;
+- }
+ }
+
+ *mc_status = status;
+@@ -369,17 +362,8 @@ static int mc_polling_wait_atomic(struct
+
+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+- if (timeout_usecs == 0) {
+- dev_dbg(mc_io->dev,
+- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+- mc_io->portal_phys_addr,
+- (unsigned int)
+- MC_CMD_HDR_READ_TOKEN(cmd->header),
+- (unsigned int)
+- MC_CMD_HDR_READ_CMDID(cmd->header));
+-
++ if (timeout_usecs == 0)
+ return -ETIMEDOUT;
+- }
+ }
+
+ *mc_status = status;
+@@ -422,9 +406,19 @@ int mc_send_command(struct fsl_mc_io *mc
+ else
+ error = mc_polling_wait_atomic(mc_io, cmd, &status);
+
+- if (error < 0)
++ if (error < 0) {
++ if (error == -ETIMEDOUT) {
++ pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
++ mc_io->portal_phys_addr,
++ (unsigned int)
++ MC_CMD_HDR_READ_TOKEN(cmd->header),
++ (unsigned int)
++ MC_CMD_HDR_READ_CMDID(cmd->header));
++ }
+ goto common_exit;
+
++ }
++
+ if (status != MC_CMD_STATUS_OK) {
+ dev_dbg(mc_io->dev,
+ "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
diff --git a/target/linux/layerscape/patches-4.4/7196-staging-fsl-mc-make-fsl_mc_get_root_dprc-public.patch b/target/linux/layerscape/patches-4.4/7196-staging-fsl-mc-make-fsl_mc_get_root_dprc-public.patch
new file mode 100644
index 0000000..d465f90
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7196-staging-fsl-mc-make-fsl_mc_get_root_dprc-public.patch
@@ -0,0 +1,45 @@
+From 12b1317fb3ab5b56efd833fa3b22965adf1d2c96 Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Fri, 15 Apr 2016 17:07:16 -0500
+Subject: [PATCH 196/226] staging: fsl-mc: make fsl_mc_get_root_dprc public
+
+this is needed by other components (e.g. vfio) to find
+the root dprc
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-mc/bus/mc-bus.c | 3 ++-
+ drivers/staging/fsl-mc/include/mc.h | 3 +++
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/staging/fsl-mc/bus/mc-bus.c
++++ b/drivers/staging/fsl-mc/bus/mc-bus.c
+@@ -358,7 +358,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_bus_exists);
+ /**
+ * fsl_mc_get_root_dprc - function to traverse to the root dprc
+ */
+-static void fsl_mc_get_root_dprc(struct device *dev,
++void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev)
+ {
+ if (WARN_ON(!dev)) {
+@@ -371,6 +371,7 @@ static void fsl_mc_get_root_dprc(struct
+ *root_dprc_dev = (*root_dprc_dev)->parent;
+ }
+ }
++EXPORT_SYMBOL_GPL(fsl_mc_get_root_dprc);
+
+ static int get_dprc_attr(struct fsl_mc_io *mc_io,
+ int container_id, struct dprc_attributes *attr)
+--- a/drivers/staging/fsl-mc/include/mc.h
++++ b/drivers/staging/fsl-mc/include/mc.h
+@@ -191,6 +191,9 @@ void fsl_mc_driver_unregister(struct fsl
+
+ bool fsl_mc_bus_exists(void);
+
++void fsl_mc_get_root_dprc(struct device *dev,
++ struct device **root_dprc_dev);
++
+ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+ u16 mc_io_flags,
+ struct fsl_mc_io **new_mc_io);
diff --git a/target/linux/layerscape/patches-4.4/7197-staging-fsl-mc-Management-Complex-restool-driver.patch b/target/linux/layerscape/patches-4.4/7197-staging-fsl-mc-Management-Complex-restool-driver.patch
new file mode 100644
index 0000000..2a5e5df
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7197-staging-fsl-mc-Management-Complex-restool-driver.patch
@@ -0,0 +1,489 @@
+From fb4881d149742e4c5595aca8bf86c99d2ea155ad Mon Sep 17 00:00:00 2001
+From: Lijun Pan <Lijun.Pan at freescale.com>
+Date: Mon, 8 Feb 2016 17:40:18 -0600
+Subject: [PATCH 197/226] staging: fsl-mc: Management Complex restool driver
+
+The kernel support for the restool (a user space resource management
+tool) is a driver for the /dev/dprc.N device file.
+Its purpose is to provide an ioctl interface,
+which the restool uses to interact with the MC bus driver
+and with the MC firmware.
+We allocate a dpmcp at driver initialization,
+and keep that dpmcp until driver exit.
+We use that dpmcp by default.
+If that dpmcp is in use, we create another portal at run time
+and destroy the newly created portal after use.
+The ioctl RESTOOL_SEND_MC_COMMAND sends user space command to fsl-mc
+bus and utilizes the fsl-mc bus to communicate with MC firmware.
+The ioctl RESTOOL_DPRC_SYNC request the mc-bus launch
+objects scan under root dprc.
+In order to support multiple root dprc, we utilize the bus notify
+mechanism to scan fsl_mc_bus_type for the newly added root dprc.
+After discovering the root dprc, it creates a miscdevice
+/dev/dprc.N to associate with this root dprc.
+
+Signed-off-by: Lijun Pan <Lijun.Pan at freescale.com>
+[Stuart: minor fix to resolve compile error]
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ Documentation/ioctl/ioctl-number.txt | 1 +
+ drivers/staging/fsl-mc/bus/Kconfig | 7 +-
+ drivers/staging/fsl-mc/bus/Makefile | 3 +
+ drivers/staging/fsl-mc/bus/mc-ioctl.h | 22 ++
+ drivers/staging/fsl-mc/bus/mc-restool.c | 392 +++++++++++++++++++++++++++++++
+ 5 files changed, 424 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c
+
+--- a/Documentation/ioctl/ioctl-number.txt
++++ b/Documentation/ioctl/ioctl-number.txt
+@@ -170,6 +170,7 @@ Code Seq#(hex) Include File Comments
+ 'R' 00-1F linux/random.h conflict!
+ 'R' 01 linux/rfkill.h conflict!
+ 'R' C0-DF net/bluetooth/rfcomm.h
++'R' E0-EF drivers/staging/fsl-mc/bus/mc-ioctl.h
+ 'S' all linux/cdrom.h conflict!
+ 'S' 80-81 scsi/scsi_ioctl.h conflict!
+ 'S' 82-FF scsi/scsi.h conflict!
+--- a/drivers/staging/fsl-mc/bus/Kconfig
++++ b/drivers/staging/fsl-mc/bus/Kconfig
+@@ -22,4 +22,9 @@ config FSL_MC_BUS
+ Only enable this option when building the kernel for
+ Freescale QorQIQ LS2xxxx SoCs.
+
+-
++config FSL_MC_RESTOOL
++ tristate "Freescale Management Complex (MC) restool driver"
++ depends on FSL_MC_BUS
++ help
++ Driver that provides kernel support for the Freescale Management
++ Complex resource manager user-space tool.
+--- a/drivers/staging/fsl-mc/bus/Makefile
++++ b/drivers/staging/fsl-mc/bus/Makefile
+@@ -18,3 +18,6 @@ mc-bus-driver-objs := mc-bus.o \
+ dpmcp.o \
+ dpbp.o \
+ dpcon.o
++
++# MC restool kernel support
++obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/mc-ioctl.h
+@@ -0,0 +1,22 @@
++/*
++ * Freescale Management Complex (MC) ioclt interface
++ *
++ * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Author: Lijun Pan <Lijun.Pan at freescale.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++#ifndef _FSL_MC_IOCTL_H_
++#define _FSL_MC_IOCTL_H_
++
++#include <linux/ioctl.h>
++#include "../include/mc-sys.h"
++
++#define RESTOOL_IOCTL_TYPE 'R'
++
++#define RESTOOL_SEND_MC_COMMAND \
++ _IOWR(RESTOOL_IOCTL_TYPE, 0xE0, struct mc_command)
++
++#endif /* _FSL_MC_IOCTL_H_ */
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/mc-restool.c
+@@ -0,0 +1,392 @@
++/*
++ * Freescale Management Complex (MC) restool driver
++ *
++ * Copyright (C) 2014 Freescale Semiconductor, Inc.
++ * Author: Lijun Pan <Lijun.Pan at freescale.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include "../include/mc-private.h"
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/mutex.h>
++#include <linux/platform_device.h>
++#include "mc-ioctl.h"
++#include "../include/mc-sys.h"
++#include "../include/mc-cmd.h"
++#include "../include/dpmng.h"
++
++/**
++ * Maximum number of DPRCs that can be opened at the same time
++ */
++#define MAX_DPRC_HANDLES 64
++
++/**
++ * restool_misc - information associated with the newly added miscdevice
++ * @misc: newly created miscdevice associated with root dprc
++ * @miscdevt: device id of this miscdevice
++ * @list: a linked list node representing this miscdevcie
++ * @static_mc_io: pointer to the static MC I/O object used by the restool
++ * @dynamic_instance_count: number of dynamically created instances
++ * @static_instance_in_use: static instance is in use or not
++ * @mutex: mutex lock to serialze the open/release operations
++ * @dev: root dprc associated with this miscdevice
++ */
++struct restool_misc {
++ struct miscdevice misc;
++ dev_t miscdevt;
++ struct list_head list;
++ struct fsl_mc_io *static_mc_io;
++ u32 dynamic_instance_count;
++ bool static_instance_in_use;
++ struct mutex mutex; /* serialze the open/release operations */
++ struct device *dev;
++};
++
++/*
++ * initialize a global list to link all
++ * the miscdevice nodes (struct restool_misc)
++ */
++static LIST_HEAD(misc_list);
++static DEFINE_MUTEX(misc_list_mutex);
++
++static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep)
++{
++ struct fsl_mc_device *root_mc_dev;
++ int error;
++ struct fsl_mc_io *dynamic_mc_io = NULL;
++ struct restool_misc *restool_misc = NULL;
++ struct restool_misc *restool_misc_cursor;
++
++ mutex_lock(&misc_list_mutex);
++
++ list_for_each_entry(restool_misc_cursor, &misc_list, list) {
++ if (restool_misc_cursor->miscdevt == inode->i_rdev) {
++ restool_misc = restool_misc_cursor;
++ break;
++ }
++ }
++
++ mutex_unlock(&misc_list_mutex);
++
++ if (!restool_misc)
++ return -EINVAL;
++
++ if (WARN_ON(!restool_misc->dev))
++ return -EINVAL;
++
++ mutex_lock(&restool_misc->mutex);
++
++ if (!restool_misc->static_instance_in_use) {
++ restool_misc->static_instance_in_use = true;
++ filep->private_data = restool_misc->static_mc_io;
++ } else {
++ dynamic_mc_io = kzalloc(sizeof(*dynamic_mc_io), GFP_KERNEL);
++ if (!dynamic_mc_io) {
++ error = -ENOMEM;
++ goto err_unlock;
++ }
++
++ root_mc_dev = to_fsl_mc_device(restool_misc->dev);
++ error = fsl_mc_portal_allocate(root_mc_dev, 0, &dynamic_mc_io);
++ if (error < 0) {
++ pr_err("Not able to allocate MC portal\n");
++ goto free_dynamic_mc_io;
++ }
++ ++restool_misc->dynamic_instance_count;
++ filep->private_data = dynamic_mc_io;
++ }
++
++ mutex_unlock(&restool_misc->mutex);
++
++ return 0;
++
++free_dynamic_mc_io:
++ kfree(dynamic_mc_io);
++err_unlock:
++ mutex_unlock(&restool_misc->mutex);
++
++ return error;
++}
++
++static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep)
++{
++ struct fsl_mc_io *local_mc_io = filep->private_data;
++ struct restool_misc *restool_misc = NULL;
++ struct restool_misc *restool_misc_cursor;
++
++ if (WARN_ON(!filep->private_data))
++ return -EINVAL;
++
++ mutex_lock(&misc_list_mutex);
++
++ list_for_each_entry(restool_misc_cursor, &misc_list, list) {
++ if (restool_misc_cursor->miscdevt == inode->i_rdev) {
++ restool_misc = restool_misc_cursor;
++ break;
++ }
++ }
++
++ mutex_unlock(&misc_list_mutex);
++
++ if (!restool_misc)
++ return -EINVAL;
++
++ mutex_lock(&restool_misc->mutex);
++
++ if (WARN_ON(restool_misc->dynamic_instance_count == 0 &&
++ !restool_misc->static_instance_in_use)) {
++ mutex_unlock(&restool_misc->mutex);
++ return -EINVAL;
++ }
++
++ /* Globally clean up opened/untracked handles */
++ fsl_mc_portal_reset(local_mc_io);
++
++ /*
++ * must check
++ * whether local_mc_io is dynamic or static instance
++ * Otherwise it will free up the reserved portal by accident
++ * or even not free up the dynamic allocated portal
++ * if 2 or more instances running concurrently
++ */
++ if (local_mc_io == restool_misc->static_mc_io) {
++ restool_misc->static_instance_in_use = false;
++ } else {
++ fsl_mc_portal_free(local_mc_io);
++ kfree(filep->private_data);
++ --restool_misc->dynamic_instance_count;
++ }
++
++ filep->private_data = NULL;
++ mutex_unlock(&restool_misc->mutex);
++
++ return 0;
++}
++
++static int restool_send_mc_command(unsigned long arg,
++ struct fsl_mc_io *local_mc_io)
++{
++ int error;
++ struct mc_command mc_cmd;
++
++ if (copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd)))
++ return -EFAULT;
++
++ /*
++ * Send MC command to the MC:
++ */
++ error = mc_send_command(local_mc_io, &mc_cmd);
++ if (error < 0)
++ return error;
++
++ if (copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static long
++fsl_mc_restool_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ int error;
++
++ switch (cmd) {
++ case RESTOOL_SEND_MC_COMMAND:
++ error = restool_send_mc_command(arg, file->private_data);
++ break;
++ default:
++ pr_err("%s: unexpected ioctl call number\n", __func__);
++ error = -EINVAL;
++ }
++
++ return error;
++}
++
++static const struct file_operations fsl_mc_restool_dev_fops = {
++ .owner = THIS_MODULE,
++ .open = fsl_mc_restool_dev_open,
++ .release = fsl_mc_restool_dev_release,
++ .unlocked_ioctl = fsl_mc_restool_dev_ioctl,
++};
++
++static int restool_add_device_file(struct device *dev)
++{
++ u32 name1 = 0;
++ char name2[20] = {0};
++ int error;
++ struct fsl_mc_device *root_mc_dev;
++ struct restool_misc *restool_misc;
++
++ if (dev->bus == &platform_bus_type && dev->driver_data) {
++ if (sscanf(dev_name(dev), "%x.%s", &name1, name2) != 2)
++ return -EINVAL;
++
++ if (strcmp(name2, "fsl-mc") == 0)
++ pr_debug("platform's root dprc name is: %s\n",
++ dev_name(&(((struct fsl_mc *)
++ (dev->driver_data))->root_mc_bus_dev->dev)));
++ }
++
++ if (!fsl_mc_is_root_dprc(dev))
++ return 0;
++
++ restool_misc = kzalloc(sizeof(*restool_misc), GFP_KERNEL);
++ if (!restool_misc)
++ return -ENOMEM;
++
++ restool_misc->dev = dev;
++ root_mc_dev = to_fsl_mc_device(dev);
++ error = fsl_mc_portal_allocate(root_mc_dev, 0,
++ &restool_misc->static_mc_io);
++ if (error < 0) {
++ pr_err("Not able to allocate MC portal\n");
++ goto free_restool_misc;
++ }
++
++ restool_misc->misc.minor = MISC_DYNAMIC_MINOR;
++ restool_misc->misc.name = dev_name(dev);
++ restool_misc->misc.fops = &fsl_mc_restool_dev_fops;
++
++ error = misc_register(&restool_misc->misc);
++ if (error < 0) {
++ pr_err("misc_register() failed: %d\n", error);
++ goto free_portal;
++ }
++
++ restool_misc->miscdevt = restool_misc->misc.this_device->devt;
++ mutex_init(&restool_misc->mutex);
++ mutex_lock(&misc_list_mutex);
++ list_add(&restool_misc->list, &misc_list);
++ mutex_unlock(&misc_list_mutex);
++
++ pr_info("/dev/%s driver registered\n", dev_name(dev));
++
++ return 0;
++
++free_portal:
++ fsl_mc_portal_free(restool_misc->static_mc_io);
++free_restool_misc:
++ kfree(restool_misc);
++
++ return error;
++}
++
++static int restool_bus_notifier(struct notifier_block *nb,
++ unsigned long action, void *data)
++{
++ int error;
++ struct device *dev = data;
++
++ switch (action) {
++ case BUS_NOTIFY_ADD_DEVICE:
++ error = restool_add_device_file(dev);
++ if (error)
++ return error;
++ break;
++ case BUS_NOTIFY_DEL_DEVICE:
++ case BUS_NOTIFY_REMOVED_DEVICE:
++ case BUS_NOTIFY_BIND_DRIVER:
++ case BUS_NOTIFY_BOUND_DRIVER:
++ case BUS_NOTIFY_UNBIND_DRIVER:
++ case BUS_NOTIFY_UNBOUND_DRIVER:
++ break;
++ default:
++ pr_err("%s: unrecognized device action from %s\n", __func__,
++ dev_name(dev));
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int add_to_restool(struct device *dev, void *data)
++{
++ return restool_add_device_file(dev);
++}
++
++static int __init fsl_mc_restool_driver_init(void)
++{
++ int error;
++ struct notifier_block *nb;
++
++ nb = kzalloc(sizeof(*nb), GFP_KERNEL);
++ if (!nb)
++ return -ENOMEM;
++
++ nb->notifier_call = restool_bus_notifier;
++ error = bus_register_notifier(&fsl_mc_bus_type, nb);
++ if (error)
++ goto free_nb;
++
++ /*
++ * This driver runs after fsl-mc bus driver runs.
++ * Hence, many of the root dprcs are already attached to fsl-mc bus
++ * In order to make sure we find all the root dprcs,
++ * we need to scan the fsl_mc_bus_type.
++ */
++ error = bus_for_each_dev(&fsl_mc_bus_type, NULL, NULL, add_to_restool);
++ if (error) {
++ bus_unregister_notifier(&fsl_mc_bus_type, nb);
++ kfree(nb);
++ pr_err("restool driver registration failure\n");
++ return error;
++ }
++
++ return 0;
++
++free_nb:
++ kfree(nb);
++ return error;
++}
++
++module_init(fsl_mc_restool_driver_init);
++
++static void __exit fsl_mc_restool_driver_exit(void)
++{
++ struct restool_misc *restool_misc;
++ struct restool_misc *restool_misc_tmp;
++ char name1[20] = {0};
++ u32 name2 = 0;
++
++ list_for_each_entry_safe(restool_misc, restool_misc_tmp,
++ &misc_list, list) {
++ if (sscanf(restool_misc->misc.name, "%4s.%u", name1, &name2)
++ != 2)
++ continue;
++
++ pr_debug("name1=%s,name2=%u\n", name1, name2);
++ pr_debug("misc-device: %s\n", restool_misc->misc.name);
++ if (strcmp(name1, "dprc") != 0)
++ continue;
++
++ if (WARN_ON(!restool_misc->static_mc_io))
++ return;
++
++ if (WARN_ON(restool_misc->dynamic_instance_count != 0))
++ return;
++
++ if (WARN_ON(restool_misc->static_instance_in_use))
++ return;
++
++ misc_deregister(&restool_misc->misc);
++ pr_info("/dev/%s driver unregistered\n",
++ restool_misc->misc.name);
++ fsl_mc_portal_free(restool_misc->static_mc_io);
++ list_del(&restool_misc->list);
++ kfree(restool_misc);
++ }
++}
++
++module_exit(fsl_mc_restool_driver_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor Inc.");
++MODULE_DESCRIPTION("Freescale's MC restool driver");
++MODULE_LICENSE("GPL");
diff --git a/target/linux/layerscape/patches-4.4/7198-staging-fsl-mc-dpio-services-driver.patch b/target/linux/layerscape/patches-4.4/7198-staging-fsl-mc-dpio-services-driver.patch
new file mode 100644
index 0000000..7613d0a
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7198-staging-fsl-mc-dpio-services-driver.patch
@@ -0,0 +1,8943 @@
+From 331b26080961f0289c3a8a8e5e65f6524b23be19 Mon Sep 17 00:00:00 2001
+From: Jeffrey Ladouceur <Jeffrey.Ladouceur at freescale.com>
+Date: Tue, 7 Apr 2015 23:24:55 -0400
+Subject: [PATCH 198/226] staging: fsl-mc: dpio services driver
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is a commit of a squash of the cummulative dpio services patches
+in the sdk 2.0 kernel as of 3/7/2016.
+
+staging: fsl-mc: dpio: initial implementation of dpio services
+
+* Port from kernel 3.16 to 3.19
+* upgrade to match MC fw 7.0.0
+* return -EPROBE_DEFER if fsl_mc_portal_allocate() fails.
+* enable DPIO interrupt support
+* implement service FQDAN handling
+* DPIO service selects DPIO objects using crude algorithms for now, we
+ will look to make this smarter later on.
+* Locks all DPIO ops that aren't innately lockless. Smarter selection
+ logic may allow locking to be relaxed eventually.
+* Portable QBMan driver source (and low-level MC flib code for DPIO) is
+ included and encapsulated within the DPIO driver.
+
+Signed-off-by: Geoff Thorpe <Geoff.Thorpe at freescale.com>
+Signed-off-by: Haiying Wang <Haiying.Wang at freescale.com>
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Signed-off-by: Cristian Sovaiala <cristian.sovaiala at freescale.com>
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Jeffrey Ladouceur <Jeffrey.Ladouceur at freescale.com>
+[Stuart: resolved merge conflicts]
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+dpio: Use locks when querying fq state
+
+merged from patch in 3.19-bringup branch.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Signed-off-by: Jeffrey Ladouceur <Jeffrey.Ladouceur at freescale.com>
+Change-Id: Ia4d09f8a0cf4d8a4a2aa1cb39be789c34425286d
+Reviewed-on: http://git.am.freescale.net:8181/34707
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Haiying Wang <Haiying.Wang at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+qbman: Fix potential race in VDQCR handling
+
+Remove atomic_read() check of the VDQCR busy marker. These checks were racy
+as the flag could be incorrectly cleared if checked while another thread was
+starting a pull command. The check is unneeded since we can determine the
+owner of the outstanding pull command through other means.
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+Change-Id: Icc64577c0a4ce6dadef208975e980adfc6796c86
+Reviewed-on: http://git.am.freescale.net:8181/34705
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Haiying Wang <Haiying.Wang at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpio: Fix IRQ handler and remove useless spinlock
+
+The IRQ handler for a threaded IRQ requires two parts: initally the handler
+should check status and inhibit the IRQ then the threaded portion should
+process and reenable.
+
+Also remove a spinlock that was redundant with the QMan driver and a debug
+check that could trigger under a race condition
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+Signed-off-by: Jeffrey Ladouceur <Jeffrey.Ladouceur at freescale.com>
+Change-Id: I64926583af0be954228de94ae354fa005c8ec88a
+Reviewed-on: http://git.am.freescale.net:8181/34706
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Haiying Wang <Haiying.Wang at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+staging: fsl-mc: dpio: Implement polling if IRQ not available
+
+Temporarly add a polling mode to DPIO in the case that the IRQ
+registration fails
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+Change-Id: Iebbd488fd14dd9878ef846e40f3ebcbcd0eb1e80
+Reviewed-on: http://git.am.freescale.net:8181/34775
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Jeffrey Ladouceur <Jeffrey.Ladouceur at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-mc-dpio: Fix to make this work without interrupt
+
+Some additional fixes to make dpio driver work in poll mode.
+This is needed for direct assignment to KVM Guest.
+
+Signed-off-by: Bharat Bhushan <Bharat.Bhushan at freescale.com>
+Change-Id: Icf66b8c0c7f7e1610118f78396534c067f594934
+Reviewed-on: http://git.am.freescale.net:8181/35333
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-mc-dpio: Make QBMan token tracking internal
+
+Previousy the QBMan portal code required the caller to properly set and
+check for a token value used by the driver to detect when the QMan
+hardware had completed a dequeue. This patch simplifes the driver
+interface by internally dealing with token values. The driver will now
+set the token value to 0 once it has dequeued a frame while a token
+value of 1 indicates the HW has completed the dequeue but SW has not
+consumed the frame yet.
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+Change-Id: If94d9728b0faa0fd79b47108f5cb05a425b89c18
+Reviewed-on: http://git.am.freescale.net:8181/35433
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Haiying Wang <Haiying.Wang at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-mc-dpio: Distribute DPIO IRQs among cores
+
+Configure the DPIO IRQ affinities across all available cores
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+Change-Id: Ib45968a070460b7e9410bfe6067b20ecd3524c54
+Reviewed-on: http://git.am.freescale.net:8181/35540
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Haiying Wang <Haiying.Wang at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpio/qbman: add flush after finishing cena write
+
+Signed-off-by: Haiying Wang <Haiying.Wang at freescale.com>
+Change-Id: I19537f101f7f5b443d60c0ad0e5d96c1dc302223
+Reviewed-on: http://git.am.freescale.net:8181/35854
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpio/qbman: rename qbman_dq_entry to qbman_result
+
+Currently qbman_dq_entry is used for both dq result in dqrr
+and memory, and notifications in dqrr and memory. It doesn't
+make sense to have dq_entry in name for those notifications
+which have nothing to do with dq. So we rename this as
+qbman_result which is meaningful for both cases.
+
+Signed-off-by: Haiying Wang <Haiying.Wang at freescale.com>
+Change-Id: I62b3e729c571a1195e8802a9fab3fca97a14eae4
+Reviewed-on: http://git.am.freescale.net:8181/35535
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpio/qbman: add APIs to parse BPSCN and CGCU
+
+BPSCN and CGCU are notifications which can only be written to memory.
+We need to consider the host endianness while parsing these notification.
+Also modify the check of FQRN/CSCN_MEM with the same consideration.
+
+Signed-off-by: Haiying Wang <Haiying.Wang at freescale.com>
+Change-Id: I572e0aa126107aed40e1ce326d5df7956882a939
+Reviewed-on: http://git.am.freescale.net:8181/35536
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpio/qbman: remove EXPORT_SYMBOL for qbman APIs
+
+because they are only used by dpio.
+
+Signed-off-by: Haiying Wang <Haiying.Wang at freescale.com>
+Change-Id: I12e7b81c2d32f3c7b3df9fd73b742b1b675f4b8b
+Reviewed-on: http://git.am.freescale.net:8181/35537
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpio/qbman: add invalidate and prefetch support
+
+for cachable memory access.
+Also remove the redundant memory barriers.
+
+Signed-off-by: Haiying Wang <Haiying.Wang at freescale.com>
+Change-Id: I452a768278d1c5ef37e5741e9b011d725cb57b30
+Reviewed-on: http://git.am.freescale.net:8181/35873
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpio-driver: Fix qman-portal interrupt masking in poll mode
+
+DPIO driver should mask qman-portal interrupt reporting When
+working in poll mode. has_irq flag is used for same, but
+interrupt maksing was happening before it was decided that
+system will work in poll mode of interrupt mode.
+
+This patch fixes the issue and not irq masking/enabling is
+happening after irq/poll mode is decided.
+
+Signed-off-by: Bharat Bhushan <Bharat.Bhushan at freescale.com>
+Change-Id: I44de07b6142e80b3daea45e7d51a2d2799b2ed8d
+Reviewed-on: http://git.am.freescale.net:8181/37100
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+(cherry picked from commit 3579244250dcb287a0fe58bcc3b3780076d040a2)
+
+dpio: Add a function to query buffer pool depth
+
+Add a debug function thay allows users to query the number
+of buffers in a specific buffer pool
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+Change-Id: Ie9a5f2e86d6a04ae61868bcc807121780c53cf6c
+Reviewed-on: http://git.am.freescale.net:8181/36069
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+(cherry picked from commit 3c749d860592f62f6b219232580ca35fd1075337)
+
+dpio: Use normal cachable non-shareable memory for qbman cena
+
+QBMan SWP CENA portal memory requires the memory to be cacheable,
+and non-shareable.
+
+Signed-off-by: Haiying Wang <Haiying.Wang at freescale.com>
+Change-Id: I1c01cffe9ff2503fea2396d7cc761508f6e1ca85
+Reviewed-on: http://git.am.freescale.net:8181/35487
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+(cherry picked from commit 2a7e1ede7e155d9219006999893912e0b029ce4c)
+
+fsl-dpio: Process frames in IRQ context
+
+Stop using threaded IRQs and move back to hardirq top-halves.
+This is the first patch of a small series adapting the DPIO and Ethernet
+code to these changes.
+
+Signed-off-by: Roy Pledge <roy.pledge at freescale.com>
+Tested-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Tested-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+[Stuart: split out dpaa-eth part separately]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-dpio: Fast DPIO object selection
+
+The DPIO service code had a couple of problems with performance impact:
+ - The DPIO service object was protected by a global lock, within
+ functions called from the fast datapath on multiple CPUs.
+ - The DPIO service code would iterate unnecessarily through its linked
+ list, while most of the time it looks for CPU-bound objects.
+
+Add a fast-access array pointing to the same dpaa_io objects as the DPIO
+service's linked list, used in non-preemptible contexts.
+Avoid list access/reordering if a specific CPU was requested. This
+greatly limits contention on the global service lock.
+Make explicit calls for per-CPU DPIO service objects if the current
+context permits (which is the case on most of the Ethernet fastpath).
+
+These changes incidentally fix a functional problem, too: according to
+the specification of struct dpaa_io_notification_ctx, registration should
+fail if the specification of 'desired_cpu' cannot be observed. Instead,
+dpaa_io_service_register() would keep searching for non-affine DPIO
+objects, even when that was not requested.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I2dd78bc56179f97d3fd78052a653456e5f89ed82
+Reviewed-on: http://git.am.freescale.net:8181/37689
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+DPIO: Implement a missing lock in DPIO
+
+Implement missing DPIO service notification deregistration lock
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+Change-Id: Ida9a4d00cc3a66bc215c260a8df2b197366736f7
+Reviewed-on: http://git.am.freescale.net:8181/38497
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Haiying Wang <Haiying.Wang at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+staging: fsl-mc: migrated dpio flibs for MC fw 8.0.0
+
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl_qbman: Ensure SDQCR is only enabled if a channel is selected
+
+QMan HW considers an SDQCR command that does not indicate any
+channels to dequeue from to be an error. This change ensures that
+a NULL command is set in the case no channels are selected for dequeue
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+Change-Id: I8861304881885db00df4a29d760848990d706c70
+Reviewed-on: http://git.am.freescale.net:8181/38498
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Haiying Wang <Haiying.Wang at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+flib: dpio: Fix compiler warning.
+
+Gcc takes the credit here.
+To be merged with other fixes on this branch.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: If81f35ab3e8061aae1e03b72ab16a4c1dc390c3a
+Reviewed-on: http://git.am.freescale.net:8181/39148
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+staging: fsl-mc: dpio: remove programing of MSIs in dpio driver
+
+this is now handled in the bus driver
+
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl_qbman: Enable CDAN generation
+
+Enable CDAN notificiation registration in both QBMan and DPIO
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+
+fsl_dpio: Implement API to dequeue from a channel
+
+Implement an API that allows users to dequeue from a channel
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+
+fsl-dpio: Change dequeue command type
+
+For now CDANs don't work with priority precedence.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpio: Export FQD context getter function
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl_dpio: Fix DPIO polling thread logic
+
+Fix the logic for the DPIO polling logic and ensure the thread
+is not parked
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+[Stuart: fixed typo in comment]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-dpio,qbman: Export functions
+
+A few of the functions used by the Ethernet driver were not exported
+yet. Needed in order to compile Eth driver as a module.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl_qbman: Use proper accessors when reading QBMan portals
+
+Use accessors that properly byteswap when accessing QBMan portals
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+
+fsl_qbman: Fix encoding of 64 byte values
+
+The QBMan driver encodes commands in 32 bit host endianess then
+coverts to little endian before sending to HW. This means 64
+byte values need to be encoded so that the values will be
+correctly swapped when the commands are written to HW.
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+
+dpaa_fd: Add functions for SG entries endianness conversions
+
+Scatter gather entries are little endian at the hardware level.
+Add functions for converting the SG entry structure to cpu
+endianness to avoid incorrect behaviour on BE kernels.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl_dpaa: update header files with kernel-doc format
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+
+qbman: update header fiels to follow kernel-doc format
+
+Plus rename orp_id as opr_id based on the BG.
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+
+fsl/dpio: rename ldpaa to dpaa2
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+(Stuart: removed eth part out into separate patch)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+qbman_test: update qbman_test
+
+- Update to sync with latest change in qbman driver.
+- Add bpscn test case
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+
+fsl-dpio: add FLE (Frame List Entry) for FMT=dpaa_fd_list support
+
+Signed-off-by: Horia Geantă <horia.geanta at freescale.com>
+
+fsl-dpio: add accessors for FD[FRC]
+
+Signed-off-by: Horia Geantă <horia.geanta at freescale.com>
+
+fsl-dpio: add accessors for FD[FLC]
+
+Signed-off-by: Horia Geantă <horia.geanta at freescale.com>
+(Stuart: corrected typo in subject)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+fsl/dpio: dpaa2_fd: Add the comments for newly added APIs.
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+[Stuart: added fsl/dpio prefix on commit subject]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-dpio: rename dpaa_* structure to dpaa2_*
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+(Stuart: split eth and caam parts out into separate patches)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+fsl-dpio: update the header file with more description in comments
+
+plus fix some typos.
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+
+fsl-dpio: fix Klocwork issues.
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+
+fsl_dpio: Fix kernel doc issues and add an overview
+
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+
+fsl-dpio,qbman: Prefer affine portal to acquire/release buffers
+
+The FQ enqueue/dequeue DPIO code attempts to select an affine QBMan
+portal in order to minimize contention (under the assumption that most
+of the calling code runs in affine contexts). Doing the same now for
+buffer acquire/release.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+fsl-dpio: prefer affine QBMan portal in dpaa2_io_service_enqueue_fq
+
+Commit 7b057d9bc3d31 ("fsl-dpio: Fast DPIO object selection")
+took care of dpaa2_io_service_enqueue_qd, missing
+dpaa2_io_service_enqueue_fq.
+
+Cc: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Signed-off-by: Horia Geantă <horia.geanta at freescale.com>
+
+fsl/dpio: update the dpio flib files from mc9.0.0 release
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+
+fsl/dpio: pass qman_version from dpio attributes to swp desc
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+
+fsl/dpio/qbman: Use qman version to determin dqrr size
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+
+fsl-dpio: Fix dequeue type enum values
+
+enum qbman_pull_type_e did not follow the volatile dequeue command
+specification, for which VERB=b'00 is a valid value (but of no
+interest to us).
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+
+fsl-dpio: Volatile dequeue with priority precedence
+
+Use priority precedence to do volatile dequeue from channels, rather
+than active FQ precedence.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-mc/bus/Kconfig | 16 +
+ drivers/staging/fsl-mc/bus/Makefile | 3 +
+ drivers/staging/fsl-mc/bus/dpio/Makefile | 9 +
+ drivers/staging/fsl-mc/bus/dpio/dpio-drv.c | 405 +++++++
+ drivers/staging/fsl-mc/bus/dpio/dpio-drv.h | 33 +
+ drivers/staging/fsl-mc/bus/dpio/dpio.c | 468 ++++++++
+ drivers/staging/fsl-mc/bus/dpio/dpio_service.c | 801 +++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h | 460 ++++++++
+ drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h | 184 +++
+ drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h | 123 ++
+ drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h | 753 ++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 846 ++++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 +++
+ drivers/staging/fsl-mc/bus/dpio/qbman_portal.c | 1212 ++++++++++++++++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_portal.h | 261 +++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 173 +++
+ drivers/staging/fsl-mc/bus/dpio/qbman_sys.h | 307 +++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h | 86 ++
+ drivers/staging/fsl-mc/bus/dpio/qbman_test.c | 664 +++++++++++
+ drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h | 774 +++++++++++++
+ drivers/staging/fsl-mc/include/fsl_dpaa2_io.h | 619 ++++++++++
+ 21 files changed, 8333 insertions(+)
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio_service.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_test.c
+ create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h
+ create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_io.h
+
+--- a/drivers/staging/fsl-mc/bus/Kconfig
++++ b/drivers/staging/fsl-mc/bus/Kconfig
+@@ -28,3 +28,19 @@ config FSL_MC_RESTOOL
+ help
+ Driver that provides kernel support for the Freescale Management
+ Complex resource manager user-space tool.
++
++config FSL_MC_DPIO
++ tristate "Freescale Data Path I/O (DPIO) driver"
++ depends on FSL_MC_BUS
++ help
++ Driver for Freescale Data Path I/O (DPIO) devices.
++ A DPIO device provides queue and buffer management facilities
++ for software to interact with other Data Path devices. This
++ driver does not expose the DPIO device individually, but
++ groups them under a service layer API.
++
++config FSL_QBMAN_DEBUG
++ tristate "Freescale QBMAN Debug APIs"
++ depends on FSL_MC_DPIO
++ help
++ QBMan debug assistant APIs.
+--- a/drivers/staging/fsl-mc/bus/Makefile
++++ b/drivers/staging/fsl-mc/bus/Makefile
+@@ -21,3 +21,6 @@ mc-bus-driver-objs := mc-bus.o \
+
+ # MC restool kernel support
+ obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o
++
++# MC DPIO driver
++obj-$(CONFIG_FSL_MC_DPIO) += dpio/
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/Makefile
+@@ -0,0 +1,9 @@
++#
++# Freescale DPIO driver
++#
++
++obj-$(CONFIG_FSL_MC_BUS) += fsl-dpio-drv.o
++
++fsl-dpio-drv-objs := dpio-drv.o dpio_service.o dpio.o qbman_portal.o
++
++obj-$(CONFIG_FSL_QBMAN_DEBUG) += qbman_debug.o
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c
+@@ -0,0 +1,405 @@
++/* Copyright 2014 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++#include <linux/dma-mapping.h>
++#include <linux/kthread.h>
++#include <linux/delay.h>
++
++#include "../../include/mc.h"
++#include "../../include/fsl_dpaa2_io.h"
++
++#include "fsl_qbman_portal.h"
++#include "fsl_dpio.h"
++#include "fsl_dpio_cmd.h"
++
++#include "dpio-drv.h"
++
++#define DPIO_DESCRIPTION "DPIO Driver"
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Freescale Semiconductor, Inc");
++MODULE_DESCRIPTION(DPIO_DESCRIPTION);
++
++#define MAX_DPIO_IRQ_NAME 16 /* Big enough for "FSL DPIO %d" */
++
++struct dpio_priv {
++ struct dpaa2_io *io;
++ char irq_name[MAX_DPIO_IRQ_NAME];
++ struct task_struct *thread;
++};
++
++static int dpio_thread(void *data)
++{
++ struct dpaa2_io *io = data;
++
++ while (!kthread_should_stop()) {
++ int err = dpaa2_io_poll(io);
++
++ if (err) {
++ pr_err("dpaa2_io_poll() failed\n");
++ return err;
++ }
++ msleep(50);
++ }
++ return 0;
++}
++
++static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct dpio_priv *priv = dev_get_drvdata(dev);
++
++ return dpaa2_io_irq(priv->io);
++}
++
++static void unregister_dpio_irq_handlers(struct fsl_mc_device *ls_dev)
++{
++ int i;
++ struct fsl_mc_device_irq *irq;
++ int irq_count = ls_dev->obj_desc.irq_count;
++
++ for (i = 0; i < irq_count; i++) {
++ irq = ls_dev->irqs[i];
++ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
++ }
++}
++
++static int register_dpio_irq_handlers(struct fsl_mc_device *ls_dev, int cpu)
++{
++ struct dpio_priv *priv;
++ unsigned int i;
++ int error;
++ struct fsl_mc_device_irq *irq;
++ unsigned int num_irq_handlers_registered = 0;
++ int irq_count = ls_dev->obj_desc.irq_count;
++ cpumask_t mask;
++
++ priv = dev_get_drvdata(&ls_dev->dev);
++
++ if (WARN_ON(irq_count != 1))
++ return -EINVAL;
++
++ for (i = 0; i < irq_count; i++) {
++ irq = ls_dev->irqs[i];
++ error = devm_request_irq(&ls_dev->dev,
++ irq->msi_desc->irq,
++ dpio_irq_handler,
++ 0,
++ priv->irq_name,
++ &ls_dev->dev);
++ if (error < 0) {
++ dev_err(&ls_dev->dev,
++ "devm_request_irq() failed: %d\n",
++ error);
++ goto error_unregister_irq_handlers;
++ }
++
++ /* Set the IRQ affinity */
++ cpumask_clear(&mask);
++ cpumask_set_cpu(cpu, &mask);
++ if (irq_set_affinity(irq->msi_desc->irq, &mask))
++ pr_err("irq_set_affinity failed irq %d cpu %d\n",
++ irq->msi_desc->irq, cpu);
++
++ num_irq_handlers_registered++;
++ }
++
++ return 0;
++
++error_unregister_irq_handlers:
++ for (i = 0; i < num_irq_handlers_registered; i++) {
++ irq = ls_dev->irqs[i];
++ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq,
++ &ls_dev->dev);
++ }
++
++ return error;
++}
++
++static int __cold
++dpaa2_dpio_probe(struct fsl_mc_device *ls_dev)
++{
++ struct dpio_attr dpio_attrs;
++ struct dpaa2_io_desc desc;
++ struct dpio_priv *priv;
++ int err = -ENOMEM;
++ struct device *dev = &ls_dev->dev;
++ struct dpaa2_io *defservice;
++ bool irq_allocated = false;
++ static int next_cpu;
++
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ goto err_priv_alloc;
++
++ dev_set_drvdata(dev, priv);
++
++ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io);
++ if (err) {
++ dev_err(dev, "MC portal allocation failed\n");
++ err = -EPROBE_DEFER;
++ goto err_mcportal;
++ }
++
++ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id,
++ &ls_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_open() failed\n");
++ goto err_open;
++ }
++
++ err = dpio_get_attributes(ls_dev->mc_io, 0, ls_dev->mc_handle,
++ &dpio_attrs);
++ if (err) {
++ dev_err(dev, "dpio_get_attributes() failed %d\n", err);
++ goto err_get_attr;
++ }
++ err = dpio_enable(ls_dev->mc_io, 0, ls_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_enable() failed %d\n", err);
++ goto err_get_attr;
++ }
++ pr_info("ce_paddr=0x%llx, ci_paddr=0x%llx, portalid=%d, prios=%d\n",
++ ls_dev->regions[0].start,
++ ls_dev->regions[1].start,
++ dpio_attrs.qbman_portal_id,
++ dpio_attrs.num_priorities);
++
++ pr_info("ce_size=0x%llx, ci_size=0x%llx\n",
++ resource_size(&ls_dev->regions[0]),
++ resource_size(&ls_dev->regions[1]));
++
++ desc.qman_version = dpio_attrs.qbman_version;
++ /* Build DPIO driver object out of raw MC object */
++ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
++ desc.has_irq = 1;
++ desc.will_poll = 1;
++ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
++ desc.cpu = next_cpu;
++ desc.stash_affinity = 1; /* TODO: Figure out how to determine
++ this setting - will we ever have non-affine
++ portals where we stash to a platform cache? */
++ next_cpu = (next_cpu + 1) % num_active_cpus();
++ desc.dpio_id = ls_dev->obj_desc.id;
++ desc.regs_cena = ioremap_cache_ns(ls_dev->regions[0].start,
++ resource_size(&ls_dev->regions[0]));
++ desc.regs_cinh = ioremap(ls_dev->regions[1].start,
++ resource_size(&ls_dev->regions[1]));
++
++ err = fsl_mc_allocate_irqs(ls_dev);
++ if (err) {
++ dev_err(dev, "DPIO fsl_mc_allocate_irqs failed\n");
++ desc.has_irq = 0;
++ } else {
++ irq_allocated = true;
++
++ snprintf(priv->irq_name, MAX_DPIO_IRQ_NAME, "FSL DPIO %d",
++ desc.dpio_id);
++
++ err = register_dpio_irq_handlers(ls_dev, desc.cpu);
++ if (err)
++ desc.has_irq = 0;
++ }
++
++ priv->io = dpaa2_io_create(&desc);
++ if (!priv->io) {
++ dev_err(dev, "DPIO setup failed\n");
++ goto err_dpaa2_io_create;
++ }
++
++ /* If no irq then go to poll mode */
++ if (desc.has_irq == 0) {
++ dev_info(dev, "Using polling mode for DPIO %d\n",
++ desc.dpio_id);
++ /* goto err_register_dpio_irq; */
++ /* TEMP: Start polling if IRQ could not
++ be registered. This will go away once
++ KVM support for MSI is present */
++ if (irq_allocated == true)
++ fsl_mc_free_irqs(ls_dev);
++
++ if (desc.stash_affinity)
++ priv->thread = kthread_create_on_cpu(dpio_thread,
++ priv->io,
++ desc.cpu,
++ "dpio_aff%u");
++ else
++ priv->thread =
++ kthread_create(dpio_thread,
++ priv->io,
++ "dpio_non%u",
++ dpio_attrs.qbman_portal_id);
++ if (IS_ERR(priv->thread)) {
++ dev_err(dev, "DPIO thread failure\n");
++ err = PTR_ERR(priv->thread);
++ goto err_dpaa_thread;
++ }
++ kthread_unpark(priv->thread);
++ wake_up_process(priv->thread);
++ }
++
++ defservice = dpaa2_io_default_service();
++ err = dpaa2_io_service_add(defservice, priv->io);
++ dpaa2_io_down(defservice);
++ if (err) {
++ dev_err(dev, "DPIO add-to-service failed\n");
++ goto err_dpaa2_io_add;
++ }
++
++ dev_info(dev, "dpio: probed object %d\n", ls_dev->obj_desc.id);
++ dev_info(dev, " receives_notifications = %d\n",
++ desc.receives_notifications);
++ dev_info(dev, " has_irq = %d\n", desc.has_irq);
++ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle);
++ fsl_mc_portal_free(ls_dev->mc_io);
++ return 0;
++
++err_dpaa2_io_add:
++ unregister_dpio_irq_handlers(ls_dev);
++/* TEMP: To be restored once polling is removed
++ err_register_dpio_irq:
++ fsl_mc_free_irqs(ls_dev);
++*/
++err_dpaa_thread:
++err_dpaa2_io_create:
++ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle);
++err_get_attr:
++ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle);
++err_open:
++ fsl_mc_portal_free(ls_dev->mc_io);
++err_mcportal:
++ dev_set_drvdata(dev, NULL);
++ devm_kfree(dev, priv);
++err_priv_alloc:
++ return err;
++}
++
++/*
++ * Tear down interrupts for a given DPIO object
++ */
++static void dpio_teardown_irqs(struct fsl_mc_device *ls_dev)
++{
++ /* (void)disable_dpio_irqs(ls_dev); */
++ unregister_dpio_irq_handlers(ls_dev);
++ fsl_mc_free_irqs(ls_dev);
++}
++
++static int __cold
++dpaa2_dpio_remove(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev;
++ struct dpio_priv *priv;
++ int err;
++
++ dev = &ls_dev->dev;
++ priv = dev_get_drvdata(dev);
++
++ /* there is no implementation yet for pulling a DPIO object out of a
++ * running service (and they're currently always running).
++ */
++ dev_crit(dev, "DPIO unplugging is broken, the service holds onto it\n");
++
++ if (priv->thread)
++ kthread_stop(priv->thread);
++ else
++ dpio_teardown_irqs(ls_dev);
++
++ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io);
++ if (err) {
++ dev_err(dev, "MC portal allocation failed\n");
++ goto err_mcportal;
++ }
++
++ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id,
++ &ls_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_open() failed\n");
++ goto err_open;
++ }
++
++ dev_set_drvdata(dev, NULL);
++ dpaa2_io_down(priv->io);
++
++ err = 0;
++
++ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle);
++ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle);
++err_open:
++ fsl_mc_portal_free(ls_dev->mc_io);
++err_mcportal:
++ return err;
++}
++
++static const struct fsl_mc_device_match_id dpaa2_dpio_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpio",
++ .ver_major = DPIO_VER_MAJOR,
++ .ver_minor = DPIO_VER_MINOR
++ },
++ { .vendor = 0x0 }
++};
++
++static struct fsl_mc_driver dpaa2_dpio_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_dpio_probe,
++ .remove = dpaa2_dpio_remove,
++ .match_id_table = dpaa2_dpio_match_id_table
++};
++
++static int dpio_driver_init(void)
++{
++ int err;
++
++ err = dpaa2_io_service_driver_init();
++ if (!err) {
++ err = fsl_mc_driver_register(&dpaa2_dpio_driver);
++ if (err)
++ dpaa2_io_service_driver_exit();
++ }
++ return err;
++}
++static void dpio_driver_exit(void)
++{
++ fsl_mc_driver_unregister(&dpaa2_dpio_driver);
++ dpaa2_io_service_driver_exit();
++}
++module_init(dpio_driver_init);
++module_exit(dpio_driver_exit);
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h
+@@ -0,0 +1,33 @@
++/* Copyright 2014 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++int dpaa2_io_service_driver_init(void);
++void dpaa2_io_service_driver_exit(void);
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c
+@@ -0,0 +1,468 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../include/mc-sys.h"
++#include "../../include/mc-cmd.h"
++#include "fsl_dpio.h"
++#include "fsl_dpio_cmd.h"
++
++int dpio_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpio_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPIO_CMD_OPEN(cmd, dpio_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpio_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpio_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPIO_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpio_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpio_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpio_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpio_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPIO_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpio_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpio_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPIO_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpio_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpio_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpio_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t sdest)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
++ cmd_flags,
++ token);
++ DPIO_CMD_SET_STASHING_DEST(cmd, sdest);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t *sdest)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest);
++
++ return 0;
++}
++
++int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id,
++ uint8_t *channel_index)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL,
++ cmd_flags,
++ token);
++ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index);
++
++ return 0;
++}
++
++int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL,
++ cmd_flags,
++ token);
++ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio_service.c
+@@ -0,0 +1,801 @@
++/* Copyright 2014 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/types.h>
++#include "fsl_qbman_portal.h"
++#include "../../include/mc.h"
++#include "../../include/fsl_dpaa2_io.h"
++#include "fsl_dpio.h"
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++
++#include "dpio-drv.h"
++#include "qbman_debug.h"
++
++#define UNIMPLEMENTED() pr_err("FOO: %s unimplemented!\n", __func__)
++
++#define MAGIC_SERVICE 0xabcd9876
++#define MAGIC_OBJECT 0x1234fedc
++
++struct dpaa2_io {
++ /* If MAGIC_SERVICE, this is a group of objects, use the 'service' part
++ * of the union. If MAGIC_OBJECT, use the 'object' part of the union. If
++ * it's neither, something got corrupted. This is mainly to satisfy
++ * dpaa2_io_from_registration(), which dereferences a caller-
++ * instantiated struct and so warrants a bug-checking step - hence the
++ * magic rather than a boolean.
++ */
++ unsigned int magic;
++ atomic_t refs;
++ union {
++ struct dpaa2_io_service {
++ spinlock_t lock;
++ struct list_head list;
++ /* for targeted dpaa2_io selection */
++ struct dpaa2_io *objects_by_cpu[NR_CPUS];
++ cpumask_t cpus_notifications;
++ cpumask_t cpus_stashing;
++ int has_nonaffine;
++ /* slight hack. record the special case of the
++ * "default service", because that's the case where we
++ * need to avoid a kfree() ... */
++ int is_defservice;
++ } service;
++ struct dpaa2_io_object {
++ struct dpaa2_io_desc dpio_desc;
++ struct qbman_swp_desc swp_desc;
++ struct qbman_swp *swp;
++ /* If the object is part of a service, this is it (and
++ * 'node' is linked into the service's list) */
++ struct dpaa2_io *service;
++ struct list_head node;
++ /* Interrupt mask, as used with
++ * qbman_swp_interrupt_[gs]et_vanish(). This isn't
++ * locked, because the higher layer is driving all
++ * "ingress" processing. */
++ uint32_t irq_mask;
++ /* As part of simplifying assumptions, we provide an
++ * irq-safe lock for each type of DPIO operation that
++ * isn't innately lockless. The selection algorithms
++ * (which are simplified) require this, whereas
++ * eventually adherence to cpu-affinity will presumably
++ * relax the locking requirements. */
++ spinlock_t lock_mgmt_cmd;
++ spinlock_t lock_notifications;
++ struct list_head notifications;
++ } object;
++ };
++};
++
++struct dpaa2_io_store {
++ unsigned int max;
++ dma_addr_t paddr;
++ struct dpaa2_dq *vaddr;
++ void *alloced_addr; /* the actual return from kmalloc as it may
++ be adjusted for alignment purposes */
++ unsigned int idx; /* position of the next-to-be-returned entry */
++ struct qbman_swp *swp; /* portal used to issue VDQCR */
++ struct device *dev; /* device used for DMA mapping */
++};
++
++static struct dpaa2_io def_serv;
++
++/**********************/
++/* Internal functions */
++/**********************/
++
++static void service_init(struct dpaa2_io *d, int is_defservice)
++{
++ struct dpaa2_io_service *s = &d->service;
++
++ d->magic = MAGIC_SERVICE;
++ atomic_set(&d->refs, 1);
++ spin_lock_init(&s->lock);
++ INIT_LIST_HEAD(&s->list);
++ cpumask_clear(&s->cpus_notifications);
++ cpumask_clear(&s->cpus_stashing);
++ s->has_nonaffine = 0;
++ s->is_defservice = is_defservice;
++}
++
++/* Selection algorithms, stupid ones at that. These are to handle the case where
++ * the given dpaa2_io is a service, by choosing the non-service dpaa2_io within
++ * it to use.
++ */
++static struct dpaa2_io *_service_select_by_cpu_slow(struct dpaa2_io_service *ss,
++ int cpu)
++{
++ struct dpaa2_io *o;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&ss->lock, irqflags);
++ /* TODO: this is about the dumbest and slowest selection algorithm you
++ * could imagine. (We're looking for something working first, and
++ * something efficient second...)
++ */
++ list_for_each_entry(o, &ss->list, object.node)
++ if (o->object.dpio_desc.cpu == cpu)
++ goto found;
++
++ /* No joy. Try the first nonaffine portal (bleurgh) */
++ if (ss->has_nonaffine)
++ list_for_each_entry(o, &ss->list, object.node)
++ if (!o->object.dpio_desc.stash_affinity)
++ goto found;
++
++ /* No joy. Try the first object. Told you it was horrible. */
++ if (!list_empty(&ss->list))
++ o = list_entry(ss->list.next, struct dpaa2_io, object.node);
++ else
++ o = NULL;
++
++found:
++ spin_unlock_irqrestore(&ss->lock, irqflags);
++ return o;
++}
++
++static struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, int cpu)
++{
++ struct dpaa2_io_service *ss;
++ unsigned long irqflags;
++
++ if (!d)
++ d = &def_serv;
++ else if (d->magic == MAGIC_OBJECT)
++ return d;
++ BUG_ON(d->magic != MAGIC_SERVICE);
++
++ ss = &d->service;
++
++ /* If cpu==-1, choose the current cpu, with no guarantees about
++ * potentially being migrated away.
++ */
++ if (unlikely(cpu < 0)) {
++ spin_lock_irqsave(&ss->lock, irqflags);
++ cpu = smp_processor_id();
++ spin_unlock_irqrestore(&ss->lock, irqflags);
++
++ return _service_select_by_cpu_slow(ss, cpu);
++ }
++
++ /* If a specific cpu was requested, pick it up immediately */
++ return ss->objects_by_cpu[cpu];
++}
++
++static inline struct dpaa2_io *service_select_any(struct dpaa2_io *d)
++{
++ struct dpaa2_io_service *ss;
++ struct dpaa2_io *o;
++ unsigned long irqflags;
++
++ if (!d)
++ d = &def_serv;
++ else if (d->magic == MAGIC_OBJECT)
++ return d;
++ BUG_ON(d->magic != MAGIC_SERVICE);
++
++ /*
++ * Lock the service, looking for the first DPIO object in the list,
++ * ignore everything else about that DPIO, and choose it to do the
++ * operation! As a post-selection step, move the DPIO to the end of
++ * the list. It should improve load-balancing a little, although it
++ * might also incur a performance hit, given that the lock is *global*
++ * and this may be called on the fast-path...
++ */
++ ss = &d->service;
++ spin_lock_irqsave(&ss->lock, irqflags);
++ if (!list_empty(&ss->list)) {
++ o = list_entry(ss->list.next, struct dpaa2_io, object.node);
++ list_del(&o->object.node);
++ list_add_tail(&o->object.node, &ss->list);
++ } else
++ o = NULL;
++ spin_unlock_irqrestore(&ss->lock, irqflags);
++ return o;
++}
++
++/* If the context is not preemptible, select the service affine to the
++ * current cpu. Otherwise, "select any".
++ */
++static inline struct dpaa2_io *_service_select(struct dpaa2_io *d)
++{
++ struct dpaa2_io *temp = d;
++
++ if (likely(!preemptible())) {
++ d = service_select_by_cpu(d, smp_processor_id());
++ if (likely(d))
++ return d;
++ }
++ return service_select_any(temp);
++}
++
++/**********************/
++/* Exported functions */
++/**********************/
++
++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
++{
++ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL);
++ struct dpaa2_io_object *o = &ret->object;
++
++ if (!ret)
++ return NULL;
++ ret->magic = MAGIC_OBJECT;
++ atomic_set(&ret->refs, 1);
++ o->dpio_desc = *desc;
++ o->swp_desc.cena_bar = o->dpio_desc.regs_cena;
++ o->swp_desc.cinh_bar = o->dpio_desc.regs_cinh;
++ o->swp_desc.qman_version = o->dpio_desc.qman_version;
++ o->swp = qbman_swp_init(&o->swp_desc);
++ o->service = NULL;
++ if (!o->swp) {
++ kfree(ret);
++ return NULL;
++ }
++ INIT_LIST_HEAD(&o->node);
++ spin_lock_init(&o->lock_mgmt_cmd);
++ spin_lock_init(&o->lock_notifications);
++ INIT_LIST_HEAD(&o->notifications);
++ if (!o->dpio_desc.has_irq)
++ qbman_swp_interrupt_set_vanish(o->swp, 0xffffffff);
++ else {
++ /* For now only enable DQRR interrupts */
++ qbman_swp_interrupt_set_trigger(o->swp,
++ QBMAN_SWP_INTERRUPT_DQRI);
++ }
++ qbman_swp_interrupt_clear_status(o->swp, 0xffffffff);
++ if (o->dpio_desc.receives_notifications)
++ qbman_swp_push_set(o->swp, 0, 1);
++ return ret;
++}
++EXPORT_SYMBOL(dpaa2_io_create);
++
++struct dpaa2_io *dpaa2_io_create_service(void)
++{
++ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL);
++
++ if (ret)
++ service_init(ret, 0);
++ return ret;
++}
++EXPORT_SYMBOL(dpaa2_io_create_service);
++
++struct dpaa2_io *dpaa2_io_default_service(void)
++{
++ atomic_inc(&def_serv.refs);
++ return &def_serv;
++}
++EXPORT_SYMBOL(dpaa2_io_default_service);
++
++void dpaa2_io_down(struct dpaa2_io *d)
++{
++ if (!atomic_dec_and_test(&d->refs))
++ return;
++ if (d->magic == MAGIC_SERVICE) {
++ BUG_ON(!list_empty(&d->service.list));
++ if (d->service.is_defservice)
++ /* avoid the kfree()! */
++ return;
++ } else {
++ BUG_ON(d->magic != MAGIC_OBJECT);
++ BUG_ON(d->object.service);
++ BUG_ON(!list_empty(&d->object.notifications));
++ }
++ kfree(d);
++}
++EXPORT_SYMBOL(dpaa2_io_down);
++
++int dpaa2_io_service_add(struct dpaa2_io *s, struct dpaa2_io *o)
++{
++ struct dpaa2_io_service *ss = &s->service;
++ struct dpaa2_io_object *oo = &o->object;
++ int res = -EINVAL;
++
++ if ((s->magic != MAGIC_SERVICE) || (o->magic != MAGIC_OBJECT))
++ return res;
++ atomic_inc(&o->refs);
++ atomic_inc(&s->refs);
++ spin_lock(&ss->lock);
++ /* 'obj' must not already be associated with a service */
++ if (!oo->service) {
++ oo->service = s;
++ list_add(&oo->node, &ss->list);
++ if (oo->dpio_desc.receives_notifications) {
++ cpumask_set_cpu(oo->dpio_desc.cpu,
++ &ss->cpus_notifications);
++ /* Update the fast-access array */
++ ss->objects_by_cpu[oo->dpio_desc.cpu] =
++ container_of(oo, struct dpaa2_io, object);
++ }
++ if (oo->dpio_desc.stash_affinity)
++ cpumask_set_cpu(oo->dpio_desc.cpu,
++ &ss->cpus_stashing);
++ if (!oo->dpio_desc.stash_affinity)
++ ss->has_nonaffine = 1;
++ /* success */
++ res = 0;
++ }
++ spin_unlock(&ss->lock);
++ if (res) {
++ dpaa2_io_down(s);
++ dpaa2_io_down(o);
++ }
++ return res;
++}
++EXPORT_SYMBOL(dpaa2_io_service_add);
++
++int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc)
++{
++ if (obj->magic == MAGIC_SERVICE)
++ return -EINVAL;
++ BUG_ON(obj->magic != MAGIC_OBJECT);
++ *desc = obj->object.dpio_desc;
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_get_descriptor);
++
++#define DPAA_POLL_MAX 32
++
++int dpaa2_io_poll(struct dpaa2_io *obj)
++{
++ const struct dpaa2_dq *dq;
++ struct qbman_swp *swp;
++ int max = 0;
++
++ if (obj->magic != MAGIC_OBJECT)
++ return -EINVAL;
++ swp = obj->object.swp;
++ dq = qbman_swp_dqrr_next(swp);
++ while (dq) {
++ if (qbman_result_is_SCN(dq)) {
++ struct dpaa2_io_notification_ctx *ctx;
++ uint64_t q64;
++
++ q64 = qbman_result_SCN_ctx(dq);
++ ctx = (void *)q64;
++ ctx->cb(ctx);
++ } else
++ pr_crit("Unrecognised/ignored DQRR entry\n");
++ qbman_swp_dqrr_consume(swp, dq);
++ ++max;
++ if (max > DPAA_POLL_MAX)
++ return 0;
++ dq = qbman_swp_dqrr_next(swp);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_poll);
++
++int dpaa2_io_irq(struct dpaa2_io *obj)
++{
++ struct qbman_swp *swp;
++ uint32_t status;
++
++ if (obj->magic != MAGIC_OBJECT)
++ return -EINVAL;
++ swp = obj->object.swp;
++ status = qbman_swp_interrupt_read_status(swp);
++ if (!status)
++ return IRQ_NONE;
++ dpaa2_io_poll(obj);
++ qbman_swp_interrupt_clear_status(swp, status);
++ qbman_swp_interrupt_set_inhibit(swp, 0);
++ return IRQ_HANDLED;
++}
++EXPORT_SYMBOL(dpaa2_io_irq);
++
++int dpaa2_io_pause_poll(struct dpaa2_io *obj)
++{
++ UNIMPLEMENTED();
++ return -EINVAL;
++}
++EXPORT_SYMBOL(dpaa2_io_pause_poll);
++
++int dpaa2_io_resume_poll(struct dpaa2_io *obj)
++{
++ UNIMPLEMENTED();
++ return -EINVAL;
++}
++EXPORT_SYMBOL(dpaa2_io_resume_poll);
++
++void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask)
++{
++ struct dpaa2_io_service *ss = &s->service;
++
++ BUG_ON(s->magic != MAGIC_SERVICE);
++ cpumask_copy(mask, &ss->cpus_notifications);
++}
++EXPORT_SYMBOL(dpaa2_io_service_notifications);
++
++void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask)
++{
++ struct dpaa2_io_service *ss = &s->service;
++
++ BUG_ON(s->magic != MAGIC_SERVICE);
++ cpumask_copy(mask, &ss->cpus_stashing);
++}
++EXPORT_SYMBOL(dpaa2_io_service_stashing);
++
++int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s)
++{
++ struct dpaa2_io_service *ss = &s->service;
++
++ BUG_ON(s->magic != MAGIC_SERVICE);
++ return ss->has_nonaffine;
++}
++EXPORT_SYMBOL(dpaa2_io_service_has_nonaffine);
++
++int dpaa2_io_service_register(struct dpaa2_io *d,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ unsigned long irqflags;
++
++ d = service_select_by_cpu(d, ctx->desired_cpu);
++ if (!d)
++ return -ENODEV;
++ ctx->dpio_id = d->object.dpio_desc.dpio_id;
++ ctx->qman64 = (uint64_t)ctx;
++ ctx->dpio_private = d;
++ spin_lock_irqsave(&d->object.lock_notifications, irqflags);
++ list_add(&ctx->node, &d->object.notifications);
++ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags);
++ if (ctx->is_cdan)
++ /* Enable the generation of CDAN notifications */
++ qbman_swp_CDAN_set_context_enable(d->object.swp,
++ (uint16_t)ctx->id,
++ ctx->qman64);
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_service_register);
++
++int dpaa2_io_service_deregister(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ struct dpaa2_io *d = ctx->dpio_private;
++ unsigned long irqflags;
++
++ if (!service)
++ service = &def_serv;
++ BUG_ON((service != d) && (service != d->object.service));
++ if (ctx->is_cdan)
++ qbman_swp_CDAN_disable(d->object.swp,
++ (uint16_t)ctx->id);
++ spin_lock_irqsave(&d->object.lock_notifications, irqflags);
++ list_del(&ctx->node);
++ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags);
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_service_deregister);
++
++int dpaa2_io_service_rearm(struct dpaa2_io *d,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ unsigned long irqflags;
++ int err;
++
++ d = _service_select(d);
++ if (!d)
++ return -ENODEV;
++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags);
++ if (ctx->is_cdan)
++ err = qbman_swp_CDAN_enable(d->object.swp, (uint16_t)ctx->id);
++ else
++ err = qbman_swp_fq_schedule(d->object.swp, ctx->id);
++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags);
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_rearm);
++
++int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx,
++ struct dpaa2_io **io)
++{
++ struct dpaa2_io_notification_ctx *tmp;
++ struct dpaa2_io *d = ctx->dpio_private;
++ unsigned long irqflags;
++ int ret = 0;
++
++ BUG_ON(d->magic != MAGIC_OBJECT);
++ /* Iterate the notifications associated with 'd' looking for a match. If
++ * not, we've been passed an unregistered ctx! */
++ spin_lock_irqsave(&d->object.lock_notifications, irqflags);
++ list_for_each_entry(tmp, &d->object.notifications, node)
++ if (tmp == ctx)
++ goto found;
++ ret = -EINVAL;
++found:
++ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags);
++ if (!ret) {
++ atomic_inc(&d->refs);
++ *io = d;
++ }
++ return ret;
++}
++EXPORT_SYMBOL(dpaa2_io_from_registration);
++
++int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu,
++ struct dpaa2_io **ret)
++{
++ if (cpu == -1)
++ *ret = service_select_any(service);
++ else
++ *ret = service_select_by_cpu(service, cpu);
++ if (*ret) {
++ atomic_inc(&(*ret)->refs);
++ return 0;
++ }
++ return -ENODEV;
++}
++EXPORT_SYMBOL(dpaa2_io_service_get_persistent);
++
++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid,
++ struct dpaa2_io_store *s)
++{
++ struct qbman_pull_desc pd;
++ int err;
++
++ qbman_pull_desc_clear(&pd);
++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
++ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max);
++ qbman_pull_desc_set_fq(&pd, fqid);
++ d = _service_select(d);
++ if (!d)
++ return -ENODEV;
++ s->swp = d->object.swp;
++ err = qbman_swp_pull(d->object.swp, &pd);
++ if (err)
++ s->swp = NULL;
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
++
++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid,
++ struct dpaa2_io_store *s)
++{
++ struct qbman_pull_desc pd;
++ int err;
++
++ qbman_pull_desc_clear(&pd);
++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
++ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max);
++ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
++ d = _service_select(d);
++ if (!d)
++ return -ENODEV;
++ s->swp = d->object.swp;
++ err = qbman_swp_pull(d->object.swp, &pd);
++ if (err)
++ s->swp = NULL;
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_pull_channel);
++
++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
++ uint32_t fqid,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc ed;
++
++ d = _service_select(d);
++ if (!d)
++ return -ENODEV;
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_no_orp(&ed, 0);
++ qbman_eq_desc_set_fq(&ed, fqid);
++ return qbman_swp_enqueue(d->object.swp, &ed,
++ (const struct qbman_fd *)fd);
++}
++EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
++
++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
++ uint32_t qdid, uint8_t prio, uint16_t qdbin,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc ed;
++
++ d = _service_select(d);
++ if (!d)
++ return -ENODEV;
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_no_orp(&ed, 0);
++ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
++ return qbman_swp_enqueue(d->object.swp, &ed,
++ (const struct qbman_fd *)fd);
++}
++EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd);
++
++int dpaa2_io_service_release(struct dpaa2_io *d,
++ uint32_t bpid,
++ const uint64_t *buffers,
++ unsigned int num_buffers)
++{
++ struct qbman_release_desc rd;
++
++ d = _service_select(d);
++ if (!d)
++ return -ENODEV;
++ qbman_release_desc_clear(&rd);
++ qbman_release_desc_set_bpid(&rd, bpid);
++ return qbman_swp_release(d->object.swp, &rd, buffers, num_buffers);
++}
++EXPORT_SYMBOL(dpaa2_io_service_release);
++
++int dpaa2_io_service_acquire(struct dpaa2_io *d,
++ uint32_t bpid,
++ uint64_t *buffers,
++ unsigned int num_buffers)
++{
++ unsigned long irqflags;
++ int err;
++
++ d = _service_select(d);
++ if (!d)
++ return -ENODEV;
++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags);
++ err = qbman_swp_acquire(d->object.swp, bpid, buffers, num_buffers);
++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags);
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_acquire);
++
++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
++ struct device *dev)
++{
++ struct dpaa2_io_store *ret = kmalloc(sizeof(*ret), GFP_KERNEL);
++ size_t size;
++
++ BUG_ON(!max_frames || (max_frames > 16));
++ if (!ret)
++ return NULL;
++ ret->max = max_frames;
++ size = max_frames * sizeof(struct dpaa2_dq) + 64;
++ ret->alloced_addr = kmalloc(size, GFP_KERNEL);
++ if (!ret->alloced_addr) {
++ kfree(ret);
++ return NULL;
++ }
++ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
++ ret->paddr = dma_map_single(dev, ret->vaddr,
++ sizeof(struct dpaa2_dq) * max_frames,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, ret->paddr)) {
++ kfree(ret->alloced_addr);
++ kfree(ret);
++ return NULL;
++ }
++ ret->idx = 0;
++ ret->dev = dev;
++ return ret;
++}
++EXPORT_SYMBOL(dpaa2_io_store_create);
++
++void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
++{
++ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
++ DMA_FROM_DEVICE);
++ kfree(s->alloced_addr);
++ kfree(s);
++}
++EXPORT_SYMBOL(dpaa2_io_store_destroy);
++
++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
++{
++ int match;
++ struct dpaa2_dq *ret = &s->vaddr[s->idx];
++
++ match = qbman_result_has_new_result(s->swp, ret);
++ if (!match) {
++ *is_last = 0;
++ return NULL;
++ }
++ BUG_ON(!qbman_result_is_DQ(ret));
++ s->idx++;
++ if (dpaa2_dq_is_pull_complete(ret)) {
++ *is_last = 1;
++ s->idx = 0;
++ /* If we get an empty dequeue result to terminate a zero-results
++ * vdqcr, return NULL to the caller rather than expecting him to
++ * check non-NULL results every time. */
++ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
++ ret = NULL;
++ } else
++ *is_last = 0;
++ return ret;
++}
++EXPORT_SYMBOL(dpaa2_io_store_next);
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid,
++ uint32_t *fcnt, uint32_t *bcnt)
++{
++ struct qbman_attr state;
++ struct qbman_swp *swp;
++ unsigned long irqflags;
++ int ret;
++
++ d = service_select_any(d);
++ if (!d)
++ return -ENODEV;
++
++ swp = d->object.swp;
++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags);
++ ret = qbman_fq_query_state(swp, fqid, &state);
++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags);
++ if (ret)
++ return ret;
++ *fcnt = qbman_fq_state_frame_count(&state);
++ *bcnt = qbman_fq_state_byte_count(&state);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_query_fq_count);
++
++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid,
++ uint32_t *num)
++{
++ struct qbman_attr state;
++ struct qbman_swp *swp;
++ unsigned long irqflags;
++ int ret;
++
++ d = service_select_any(d);
++ if (!d)
++ return -ENODEV;
++
++ swp = d->object.swp;
++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags);
++ ret = qbman_bp_query(swp, bpid, &state);
++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags);
++ if (ret)
++ return ret;
++ *num = qbman_bp_info_num_free_bufs(&state);
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_query_bp_count);
++
++#endif
++
++/* module init/exit hooks called from dpio-drv.c. These are declared in
++ * dpio-drv.h.
++ */
++int dpaa2_io_service_driver_init(void)
++{
++ service_init(&def_serv, 1);
++ return 0;
++}
++
++void dpaa2_io_service_driver_exit(void)
++{
++ if (atomic_read(&def_serv.refs) != 1)
++ pr_err("default DPIO service leaves dangling DPIO objects!\n");
++}
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h
+@@ -0,0 +1,460 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPIO_H
++#define __FSL_DPIO_H
++
++/* Data Path I/O Portal API
++ * Contains initialization APIs and runtime control APIs for DPIO
++ */
++
++struct fsl_mc_io;
++
++/**
++ * dpio_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpio_id: DPIO unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpio_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpio_id,
++ uint16_t *token);
++
++/**
++ * dpio_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * enum dpio_channel_mode - DPIO notification channel mode
++ * @DPIO_NO_CHANNEL: No support for notification channel
++ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
++ * dedicated channel in the DPIO; user should point the queue's
++ * destination in the relevant interface to this DPIO
++ */
++enum dpio_channel_mode {
++ DPIO_NO_CHANNEL = 0,
++ DPIO_LOCAL_CHANNEL = 1,
++};
++
++/**
++ * struct dpio_cfg - Structure representing DPIO configuration
++ * @channel_mode: Notification channel mode
++ * @num_priorities: Number of priorities for the notification channel (1-8);
++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ */
++struct dpio_cfg {
++ enum dpio_channel_mode channel_mode;
++ uint8_t num_priorities;
++};
++
++/**
++ * dpio_create() - Create the DPIO object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPIO object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpio_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpio_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpio_destroy() - Destroy the DPIO object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpio_is_enabled() - Check if the DPIO is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpio_reset() - Reset the DPIO, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpio_set_stashing_destination() - Set the stashing destination.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @sdest: stashing destination value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t sdest);
++
++/**
++ * dpio_get_stashing_destination() - Get the stashing destination..
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @sdest: Returns the stashing destination value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t *sdest);
++
++/**
++ * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @dpcon_id: DPCON object ID
++ * @channel_index: Returned channel index to be used in qbman API
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id,
++ uint8_t *channel_index);
++
++/**
++ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @dpcon_id: DPCON object ID
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id);
++
++/**
++ * DPIO IRQ Index and Events
++ */
++
++/**
++ * Irq software-portal index
++ */
++#define DPIO_IRQ_SWP_INDEX 0
++
++/**
++ * struct dpio_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpio_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpio_set_irq() - Set IRQ information for the DPIO to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpio_irq_cfg *irq_cfg);
++
++/**
++ * dpio_get_irq() - Get IRQ information from the DPIO.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpio_irq_cfg *irq_cfg);
++
++/**
++ * dpio_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpio_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpio_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpio_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpio_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpio_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpio_attr - Structure representing DPIO attributes
++ * @id: DPIO object ID
++ * @version: DPIO version
++ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
++ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
++ * @qbman_portal_id: Software portal ID
++ * @channel_mode: Notification channel mode
++ * @num_priorities: Number of priorities for the notification channel (1-8);
++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ * @qbman_version: QBMAN version
++ */
++struct dpio_attr {
++ int id;
++ /**
++ * struct version - DPIO version
++ * @major: DPIO major version
++ * @minor: DPIO minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint64_t qbman_portal_ce_offset;
++ uint64_t qbman_portal_ci_offset;
++ uint16_t qbman_portal_id;
++ enum dpio_channel_mode channel_mode;
++ uint8_t num_priorities;
++ uint32_t qbman_version;
++};
++
++/**
++ * dpio_get_attributes() - Retrieve DPIO attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpio_attr *attr);
++#endif /* __FSL_DPIO_H */
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h
+@@ -0,0 +1,184 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPIO_CMD_H
++#define _FSL_DPIO_CMD_H
++
++/* DPIO Version */
++#define DPIO_VER_MAJOR 3
++#define DPIO_VER_MINOR 2
++
++/* Command IDs */
++#define DPIO_CMDID_CLOSE 0x800
++#define DPIO_CMDID_OPEN 0x803
++#define DPIO_CMDID_CREATE 0x903
++#define DPIO_CMDID_DESTROY 0x900
++
++#define DPIO_CMDID_ENABLE 0x002
++#define DPIO_CMDID_DISABLE 0x003
++#define DPIO_CMDID_GET_ATTR 0x004
++#define DPIO_CMDID_RESET 0x005
++#define DPIO_CMDID_IS_ENABLED 0x006
++
++#define DPIO_CMDID_SET_IRQ 0x010
++#define DPIO_CMDID_GET_IRQ 0x011
++#define DPIO_CMDID_SET_IRQ_ENABLE 0x012
++#define DPIO_CMDID_GET_IRQ_ENABLE 0x013
++#define DPIO_CMDID_SET_IRQ_MASK 0x014
++#define DPIO_CMDID_GET_IRQ_MASK 0x015
++#define DPIO_CMDID_GET_IRQ_STATUS 0x016
++#define DPIO_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPIO_CMDID_SET_STASHING_DEST 0x120
++#define DPIO_CMDID_GET_STASHING_DEST 0x121
++#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x122
++#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x123
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_OPEN(cmd, dpio_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \
++ cfg->channel_mode);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\
++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\
++ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_offset);\
++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_offset);\
++ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\
++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->qbman_version);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
++#endif /* _FSL_DPIO_CMD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h
+@@ -0,0 +1,123 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_QBMAN_BASE_H
++#define _FSL_QBMAN_BASE_H
++
++/**
++ * struct qbman_block_desc - qbman block descriptor structure
++ *
++ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not
++ * control this QBMan instance, these values may simply be place-holders. The
++ * idea is simply that we be able to distinguish between them, eg. so that SWP
++ * descriptors can identify which QBMan instance they belong to.
++ */
++struct qbman_block_desc {
++ void *ccsr_reg_bar; /* CCSR register map */
++ int irq_rerr; /* Recoverable error interrupt line */
++ int irq_nrerr; /* Non-recoverable error interrupt line */
++};
++
++/**
++ * struct qbman_swp_desc - qbman software portal descriptor structure
++ *
++ * Descriptor for a QBMan software portal, expressed in terms that make sense to
++ * the user context. Ie. on MC, this information is likely to be true-physical,
++ * and instantiated statically at compile-time. On GPP, this information is
++ * likely to be obtained via "discovery" over a partition's "layerscape bus"
++ * (ie. in response to a MC portal command), and would take into account any
++ * virtualisation of the GPP user's address space and/or interrupt numbering.
++ */
++struct qbman_swp_desc {
++ const struct qbman_block_desc *block; /* The QBMan instance */
++ void *cena_bar; /* Cache-enabled portal register map */
++ void *cinh_bar; /* Cache-inhibited portal register map */
++ uint32_t qman_version;
++};
++
++/* Driver object for managing a QBMan portal */
++struct qbman_swp;
++
++/**
++ * struct qbman_fd - basci structure for qbman frame descriptor
++ *
++ * Place-holder for FDs, we represent it via the simplest form that we need for
++ * now. Different overlays may be needed to support different options, etc. (It
++ * is impractical to define One True Struct, because the resulting encoding
++ * routines (lots of read-modify-writes) would be worst-case performance whether
++ * or not circumstances required them.)
++ *
++ * Note, as with all data-structures exchanged between software and hardware (be
++ * they located in the portal register map or DMA'd to and from main-memory),
++ * the driver ensures that the caller of the driver API sees the data-structures
++ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words
++ * contained within this structure are represented in host-endianness, even if
++ * hardware always treats them as little-endian. As such, if any of these fields
++ * are interpreted in a binary (rather than numerical) fashion by hardware
++ * blocks (eg. accelerators), then the user should be careful. We illustrate
++ * with an example;
++ *
++ * Suppose the desired behaviour of an accelerator is controlled by the "frc"
++ * field of the FDs that are sent to it. Suppose also that the behaviour desired
++ * by the user corresponds to an "frc" value which is expressed as the literal
++ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit
++ * value in which 0xfe is the first byte and 0xba is the last byte, and as
++ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If
++ * the software is little-endian also, this can simply be achieved by setting
++ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set
++ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is
++ * to treat the 32-bit words as numerical values, in which the offset of a field
++ * from the beginning of the first byte (as required or generated by hardware)
++ * is numerically encoded by a left-shift (ie. by raising the field to a
++ * corresponding power of 2). Ie. in the current example, software could set
++ * "frc" in the following way, and it would work correctly on both little-endian
++ * and big-endian operation;
++ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24);
++ */
++struct qbman_fd {
++ union {
++ uint32_t words[8];
++ struct qbman_fd_simple {
++ uint32_t addr_lo;
++ uint32_t addr_hi;
++ uint32_t len;
++ /* offset in the MS 16 bits, BPID in the LS 16 bits */
++ uint32_t bpid_offset;
++ uint32_t frc; /* frame context */
++ /* "err", "va", "cbmt", "asal", [...] */
++ uint32_t ctrl;
++ /* flow context */
++ uint32_t flc_lo;
++ uint32_t flc_hi;
++ } simple;
++ };
++};
++
++#endif /* !_FSL_QBMAN_BASE_H */
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h
+@@ -0,0 +1,753 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_QBMAN_PORTAL_H
++#define _FSL_QBMAN_PORTAL_H
++
++#include "fsl_qbman_base.h"
++
++/**
++ * qbman_swp_init() - Create a functional object representing the given
++ * QBMan portal descriptor.
++ * @d: the given qbman swp descriptor
++ *
++ * Return qbman_swp portal object for success, NULL if the object cannot
++ * be created.
++ */
++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
++/**
++ * qbman_swp_finish() - Create and destroy a functional object representing
++ * the given QBMan portal descriptor.
++ * @p: the qbman_swp object to be destroyed.
++ *
++ */
++void qbman_swp_finish(struct qbman_swp *p);
++
++/**
++ * qbman_swp_get_desc() - Get the descriptor of the given portal object.
++ * @p: the given portal object.
++ *
++ * Return the descriptor for this portal.
++ */
++const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
++
++ /**************/
++ /* Interrupts */
++ /**************/
++
++/* See the QBMan driver API documentation for details on the interrupt
++ * mechanisms. */
++#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)
++#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)
++#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)
++#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)
++#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)
++#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
++
++/**
++ * qbman_swp_interrupt_get_vanish()
++ * qbman_swp_interrupt_set_vanish() - Get/Set the data in software portal
++ * interrupt status disable register.
++ * @p: the given software portal object.
++ * @mask: The mask to set in SWP_IDSR register.
++ *
++ * Return the settings in SWP_ISDR register for Get function.
++ */
++uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
++void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
++
++/**
++ * qbman_swp_interrupt_read_status()
++ * qbman_swp_interrupt_clear_status() - Get/Set the data in software portal
++ * interrupt status register.
++ * @p: the given software portal object.
++ * @mask: The mask to set in SWP_ISR register.
++ *
++ * Return the settings in SWP_ISR register for Get function.
++ *
++ */
++uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
++
++/**
++ * qbman_swp_interrupt_get_trigger()
++ * qbman_swp_interrupt_set_trigger() - Get/Set the data in software portal
++ * interrupt enable register.
++ * @p: the given software portal object.
++ * @mask: The mask to set in SWP_IER register.
++ *
++ * Return the settings in SWP_IER register for Get function.
++ */
++uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
++
++/**
++ * qbman_swp_interrupt_get_inhibit()
++ * qbman_swp_interrupt_set_inhibit() - Set/Set the data in software portal
++ * interrupt inhibit register.
++ * @p: the given software portal object.
++ * @mask: The mask to set in SWP_IIR register.
++ *
++ * Return the settings in SWP_IIR register for Get function.
++ */
++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
++
++ /************/
++ /* Dequeues */
++ /************/
++
++/* See the QBMan driver API documentation for details on the enqueue
++ * mechanisms. NB: the use of a 'dpaa2_' prefix for this type is because it is
++ * primarily used by the "DPIO" layer that sits above (and hides) the QBMan
++ * driver. The structure is defined in the DPIO interface, but to avoid circular
++ * dependencies we just pre/re-declare it here opaquely. */
++struct dpaa2_dq;
++
++/* ------------------- */
++/* Push-mode dequeuing */
++/* ------------------- */
++
++/**
++ * qbman_swp_push_get() - Get the push dequeue setup.
++ * @p: the software portal object.
++ * @channel_idx: the channel index to query.
++ * @enabled: returned boolean to show whether the push dequeue is enabled for
++ * the given channel.
++ */
++void qbman_swp_push_get(struct qbman_swp *, uint8_t channel_idx, int *enabled);
++/**
++ * qbman_swp_push_set() - Enable or disable push dequeue.
++ * @p: the software portal object.
++ * @channel_idx: the channel index..
++ * @enable: enable or disable push dequeue.
++ *
++ * The user of a portal can enable and disable push-mode dequeuing of up to 16
++ * channels independently. It does not specify this toggling by channel IDs, but
++ * rather by specifying the index (from 0 to 15) that has been mapped to the
++ * desired channel.
++ */
++void qbman_swp_push_set(struct qbman_swp *, uint8_t channel_idx, int enable);
++
++/* ------------------- */
++/* Pull-mode dequeuing */
++/* ------------------- */
++
++/**
++ * struct qbman_pull_desc - the structure for pull dequeue descriptor
++ */
++struct qbman_pull_desc {
++ uint32_t dont_manipulate_directly[6];
++};
++
++enum qbman_pull_type_e {
++ /* dequeue with priority precedence, respect intra-class scheduling */
++ qbman_pull_type_prio = 1,
++ /* dequeue with active FQ precedence, respect ICS */
++ qbman_pull_type_active,
++ /* dequeue with active FQ precedence, no ICS */
++ qbman_pull_type_active_noics
++};
++
++/**
++ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ * @d: the pull dequeue descriptor to be cleared.
++ */
++void qbman_pull_desc_clear(struct qbman_pull_desc *d);
++
++/**
++ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
++ * @d: the pull dequeue descriptor to be set.
++ * @storage: the pointer of the memory to store the dequeue result.
++ * @storage_phys: the physical address of the storage memory.
++ * @stash: to indicate whether write allocate is enabled.
++ *
++ * If not called, or if called with 'storage' as NULL, the result pull dequeues
++ * will produce results to DQRR. If 'storage' is non-NULL, then results are
++ * produced to the given memory location (using the physical/DMA address which
++ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
++ * those writes to main-memory express a cache-warming attribute.
++ */
++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
++ struct dpaa2_dq *storage,
++ dma_addr_t storage_phys,
++ int stash);
++/**
++ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
++ * @d: the pull dequeue descriptor to be set.
++ * @numframes: number of frames to be set, must be between 1 and 16, inclusive.
++ */
++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *, uint8_t numframes);
++
++/**
++ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
++ * @fqid: the frame queue index of the given FQ.
++ *
++ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
++ * @wqid: composed of channel id and wqid within the channel.
++ * @dct: the dequeue command type.
++ *
++ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
++ * dequeues.
++ * @chid: the channel id to be dequeued.
++ * @dct: the dequeue command type.
++ *
++ * Exactly one of the following descriptor "actions" should be set. (Calling any
++ * one of these will replace the effect of any prior call to one of these.)
++ * - pull dequeue from the given frame queue (FQ)
++ * - pull dequeue from any FQ in the given work queue (WQ)
++ * - pull dequeue from any FQ in any WQ in the given channel
++ */
++void qbman_pull_desc_set_fq(struct qbman_pull_desc *, uint32_t fqid);
++void qbman_pull_desc_set_wq(struct qbman_pull_desc *, uint32_t wqid,
++ enum qbman_pull_type_e dct);
++void qbman_pull_desc_set_channel(struct qbman_pull_desc *, uint32_t chid,
++ enum qbman_pull_type_e dct);
++
++/**
++ * qbman_swp_pull() - Issue the pull dequeue command
++ * @s: the software portal object.
++ * @d: the software portal descriptor which has been configured with
++ * the set of qbman_pull_desc_set_*() calls.
++ *
++ * Return 0 for success, and -EBUSY if the software portal is not ready
++ * to do pull dequeue.
++ */
++int qbman_swp_pull(struct qbman_swp *, struct qbman_pull_desc *d);
++
++/* -------------------------------- */
++/* Polling DQRR for dequeue results */
++/* -------------------------------- */
++
++/**
++ * qbman_swp_dqrr_next() - Get an valid DQRR entry.
++ * @s: the software portal object.
++ *
++ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
++ * only once, so repeated calls can return a sequence of DQRR entries, without
++ * requiring they be consumed immediately or in any particular order.
++ */
++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
++
++/**
++ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
++ * qbman_swp_dqrr_next().
++ * @s: the software portal object.
++ * @dq: the DQRR entry to be consumed.
++ */
++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
++
++/* ------------------------------------------------- */
++/* Polling user-provided storage for dequeue results */
++/* ------------------------------------------------- */
++/**
++ * qbman_result_has_new_result() - Check and get the dequeue response from the
++ * dq storage memory set in pull dequeue command
++ * @s: the software portal object.
++ * @dq: the dequeue result read from the memory.
++ *
++ * Only used for user-provided storage of dequeue results, not DQRR. For
++ * efficiency purposes, the driver will perform any required endianness
++ * conversion to ensure that the user's dequeue result storage is in host-endian
++ * format (whether or not that is the same as the little-endian format that
++ * hardware DMA'd to the user's storage). As such, once the user has called
++ * qbman_result_has_new_result() and been returned a valid dequeue result,
++ * they should not call it again on the same memory location (except of course
++ * if another dequeue command has been executed to produce a new result to that
++ * location).
++ *
++ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
++ * dequeue result.
++ */
++int qbman_result_has_new_result(struct qbman_swp *,
++ const struct dpaa2_dq *);
++
++/* -------------------------------------------------------- */
++/* Parsing dequeue entries (DQRR and user-provided storage) */
++/* -------------------------------------------------------- */
++
++/**
++ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not
++ * @dq: the dequeue result to be checked.
++ *
++ * DQRR entries may contain non-dequeue results, ie. notifications
++ */
++int qbman_result_is_DQ(const struct dpaa2_dq *);
++
++/**
++ * qbman_result_is_SCN() - Check the dequeue result is notification or not
++ * @dq: the dequeue result to be checked.
++ *
++ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change
++ * notifications" of one type or another. Some APIs apply to all of them, of the
++ * form qbman_result_SCN_***().
++ */
++static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
++{
++ return !qbman_result_is_DQ(dq);
++}
++
++/**
++ * Recognise different notification types, only required if the user allows for
++ * these to occur, and cares about them when they do.
++ */
++int qbman_result_is_FQDAN(const struct dpaa2_dq *);
++ /* FQ Data Availability */
++int qbman_result_is_CDAN(const struct dpaa2_dq *);
++ /* Channel Data Availability */
++int qbman_result_is_CSCN(const struct dpaa2_dq *);
++ /* Congestion State Change */
++int qbman_result_is_BPSCN(const struct dpaa2_dq *);
++ /* Buffer Pool State Change */
++int qbman_result_is_CGCU(const struct dpaa2_dq *);
++ /* Congestion Group Count Update */
++/* Frame queue state change notifications; (FQDAN in theory counts too as it
++ * leaves a FQ parked, but it is primarily a data availability notification) */
++int qbman_result_is_FQRN(const struct dpaa2_dq *); /* Retirement */
++int qbman_result_is_FQRNI(const struct dpaa2_dq *);
++ /* Retirement Immediate */
++int qbman_result_is_FQPN(const struct dpaa2_dq *); /* Park */
++
++/* NB: for parsing dequeue results (when "is_DQ" is TRUE), use the higher-layer
++ * dpaa2_dq_*() functions. */
++
++/* State-change notifications (FQDAN/CDAN/CSCN/...). */
++/**
++ * qbman_result_SCN_state() - Get the state field in State-change notification
++ */
++uint8_t qbman_result_SCN_state(const struct dpaa2_dq *);
++/**
++ * qbman_result_SCN_rid() - Get the resource id in State-change notification
++ */
++uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *);
++/**
++ * qbman_result_SCN_ctx() - Get the context data in State-change notification
++ */
++uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *);
++/**
++ * qbman_result_SCN_state_in_mem() - Get the state field in State-change
++ * notification which is written to memory instead of DQRR.
++ */
++uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *);
++/**
++ * qbman_result_SCN_rid_in_mem() - Get the resource id in State-change
++ * notification which is written to memory instead of DQRR.
++ */
++uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *);
++
++/* Type-specific "resource IDs". Mainly for illustration purposes, though it
++ * also gives the appropriate type widths. */
++#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)
++#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)
++#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)
++#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)
++#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
++#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
++
++/**
++ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
++ *
++ * Return the buffer pool id.
++ */
++uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *);
++/**
++ * qbman_result_bpscn_has_free_bufs() - Check whether there are free
++ * buffers in the pool from BPSCN.
++ *
++ * Return the number of free buffers.
++ */
++int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *);
++/**
++ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
++ * buffer pool is depleted.
++ *
++ * Return the status of buffer pool depletion.
++ */
++int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *);
++/**
++ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
++ * pool is surplus or not.
++ *
++ * Return the status of buffer pool surplus.
++ */
++int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *);
++/**
++ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
++ *
++ * Return the BPSCN context.
++ */
++uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *);
++
++/* Parsing CGCU */
++/**
++ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
++ *
++ * Return the CGCU resource id.
++ */
++uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *);
++/**
++ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
++ *
++ * Return instantaneous count in the CGCU notification.
++ */
++uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *);
++
++ /************/
++ /* Enqueues */
++ /************/
++/**
++ * struct qbman_eq_desc - structure of enqueue descriptor
++ */
++struct qbman_eq_desc {
++ uint32_t dont_manipulate_directly[8];
++};
++
++/**
++ * struct qbman_eq_response - structure of enqueue response
++ */
++struct qbman_eq_response {
++ uint32_t dont_manipulate_directly[16];
++};
++
++/**
++ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ */
++void qbman_eq_desc_clear(struct qbman_eq_desc *);
++
++/* Exactly one of the following descriptor "actions" should be set. (Calling
++ * any one of these will replace the effect of any prior call to one of these.)
++ * - enqueue without order-restoration
++ * - enqueue with order-restoration
++ * - fill a hole in the order-restoration sequence, without any enqueue
++ * - advance NESN (Next Expected Sequence Number), without any enqueue
++ * 'respond_success' indicates whether an enqueue response should be DMA'd
++ * after success (otherwise a response is DMA'd only after failure).
++ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
++ * be enqueued.
++ */
++/**
++ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
++ * @d: the enqueue descriptor.
++ * @response_success: 1 = enqueue with response always; 0 = enqueue with
++ * rejections returned on a FQ.
++ */
++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
++
++/**
++ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
++ * @d: the enqueue descriptor.
++ * @response_success: 1 = enqueue with response always; 0 = enqueue with
++ * rejections returned on a FQ.
++ * @opr_id: the order point record id.
++ * @seqnum: the order restoration sequence number.
++ * @incomplete: indiates whether this is the last fragments using the same
++ * sequeue number.
++ */
++void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
++ uint32_t opr_id, uint32_t seqnum, int incomplete);
++
++/**
++ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
++ * without any enqueue
++ * @d: the enqueue descriptor.
++ * @opr_id: the order point record id.
++ * @seqnum: the order restoration sequence number.
++ */
++void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
++ uint32_t seqnum);
++
++/**
++ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
++ * without any enqueue
++ * @d: the enqueue descriptor.
++ * @opr_id: the order point record id.
++ * @seqnum: the order restoration sequence number.
++ */
++void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
++ uint32_t seqnum);
++
++/**
++ * qbman_eq_desc_set_response() - Set the enqueue response info.
++ * @d: the enqueue descriptor
++ * @storage_phys: the physical address of the enqueue response in memory.
++ * @stash: indicate that the write allocation enabled or not.
++ *
++ * In the case where an enqueue response is DMA'd, this determines where that
++ * response should go. (The physical/DMA address is given for hardware's
++ * benefit, but software should interpret it as a "struct qbman_eq_response"
++ * data structure.) 'stash' controls whether or not the write to main-memory
++ * expresses a cache-warming attribute.
++ */
++void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
++ dma_addr_t storage_phys,
++ int stash);
++/**
++ * qbman_eq_desc_set_token() - Set token for the enqueue command
++ * @d: the enqueue descriptor
++ * @token: the token to be set.
++ *
++ * token is the value that shows up in an enqueue response that can be used to
++ * detect when the results have been published. The easiest technique is to zero
++ * result "storage" before issuing an enqueue, and use any non-zero 'token'
++ * value.
++ */
++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
++
++/**
++ * qbman_eq_desc_set_fq()
++ * qbman_eq_desc_set_qd() - Set eithe FQ or Queuing Destination for the enqueue
++ * command.
++ * @d: the enqueue descriptor
++ * @fqid: the id of the frame queue to be enqueued.
++ * @qdid: the id of the queuing destination to be enqueued.
++ * @qd_bin: the queuing destination bin
++ * @qd_prio: the queuing destination priority.
++ *
++ * Exactly one of the following descriptor "targets" should be set. (Calling any
++ * one of these will replace the effect of any prior call to one of these.)
++ * - enqueue to a frame queue
++ * - enqueue to a queuing destination
++ * Note, that none of these will have any affect if the "action" type has been
++ * set to "orp_hole" or "orp_nesn".
++ */
++void qbman_eq_desc_set_fq(struct qbman_eq_desc *, uint32_t fqid);
++void qbman_eq_desc_set_qd(struct qbman_eq_desc *, uint32_t qdid,
++ uint32_t qd_bin, uint32_t qd_prio);
++
++/**
++ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
++ * @d: the enqueue descriptor
++ * @enable: boolean to enable/disable EQDI
++ *
++ * Determines whether or not the portal's EQDI interrupt source should be
++ * asserted after the enqueue command is completed.
++ */
++void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *, int enable);
++
++/**
++ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
++ * @d: the enqueue descriptor.
++ * @enable: enabled/disable DCA mode.
++ * @dqrr_idx: DCAP_CI, the DCAP consumer index.
++ * @park: determine the whether park the FQ or not
++ *
++ * Determines whether or not a portal DQRR entry should be consumed once the
++ * enqueue command is completed. (And if so, and the DQRR entry corresponds
++ * to a held-active (order-preserving) FQ, whether the FQ should be parked
++ * instead of being rescheduled.)
++ */
++void qbman_eq_desc_set_dca(struct qbman_eq_desc *, int enable,
++ uint32_t dqrr_idx, int park);
++
++/**
++ * qbman_swp_enqueue() - Issue an enqueue command.
++ * @s: the software portal used for enqueue.
++ * @d: the enqueue descriptor.
++ * @fd: the frame descriptor to be enqueued.
++ *
++ * Please note that 'fd' should only be NULL if the "action" of the
++ * descriptor is "orp_hole" or "orp_nesn".
++ *
++ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
++ */
++int qbman_swp_enqueue(struct qbman_swp *, const struct qbman_eq_desc *,
++ const struct qbman_fd *fd);
++
++/**
++ * qbman_swp_enqueue_thresh() - Set the threshold for EQRI interrupt.
++ *
++ * An EQRI interrupt can be generated when the fill-level of EQCR falls below
++ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
++ */
++int qbman_swp_enqueue_thresh(struct qbman_swp *, unsigned int thresh);
++
++ /*******************/
++ /* Buffer releases */
++ /*******************/
++/**
++ * struct qbman_release_desc - The structure for buffer release descriptor
++ */
++struct qbman_release_desc {
++ uint32_t dont_manipulate_directly[1];
++};
++
++/**
++ * qbman_release_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ */
++void qbman_release_desc_clear(struct qbman_release_desc *);
++
++/**
++ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
++ */
++void qbman_release_desc_set_bpid(struct qbman_release_desc *, uint32_t bpid);
++
++/**
++ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
++ * interrupt source should be asserted after the release command is completed.
++ */
++void qbman_release_desc_set_rcdi(struct qbman_release_desc *, int enable);
++
++/**
++ * qbman_swp_release() - Issue a buffer release command.
++ * @s: the software portal object.
++ * @d: the release descriptor.
++ * @buffers: a pointer pointing to the buffer address to be released.
++ * @num_buffers: number of buffers to be released, must be less than 8.
++ *
++ * Return 0 for success, -EBUSY if the release command ring is not ready.
++ */
++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
++ const uint64_t *buffers, unsigned int num_buffers);
++
++ /*******************/
++ /* Buffer acquires */
++ /*******************/
++
++/**
++ * qbman_swp_acquire() - Issue a buffer acquire command.
++ * @s: the software portal object.
++ * @bpid: the buffer pool index.
++ * @buffers: a pointer pointing to the acquired buffer address|es.
++ * @num_buffers: number of buffers to be acquired, must be less than 8.
++ *
++ * Return 0 for success, or negative error code if the acquire command
++ * fails.
++ */
++int qbman_swp_acquire(struct qbman_swp *, uint32_t bpid, uint64_t *buffers,
++ unsigned int num_buffers);
++
++ /*****************/
++ /* FQ management */
++ /*****************/
++
++/**
++ * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
++ * @s: the software portal object.
++ * @fqid: the index of frame queue to be scheduled.
++ *
++ * There are a couple of different ways that a FQ can end up parked state,
++ * This schedules it.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
++
++/**
++ * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
++ * @s: the software portal object.
++ * @fqid: the index of frame queue to be forced.
++ *
++ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
++ * and thus be available for selection by any channel-dequeuing behaviour (push
++ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
++ * empty at the time this happens, the resulting dq_entry will have no FD.
++ * (qbman_result_DQ_fd() will return NULL.)
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
++
++/**
++ * qbman_swp_fq_xon()
++ * qbman_swp_fq_xoff() - XON/XOFF the frame queue.
++ * @s: the software portal object.
++ * @fqid: the index of frame queue.
++ *
++ * These functions change the FQ flow-control stuff between XON/XOFF. (The
++ * default is XON.) This setting doesn't affect enqueues to the FQ, just
++ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
++ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
++ * changed to XOFF after it had already become truly-scheduled to a channel, and
++ * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
++ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
++ * return NULL.)
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
++int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
++
++ /**********************/
++ /* Channel management */
++ /**********************/
++
++/* If the user has been allocated a channel object that is going to generate
++ * CDANs to another channel, then these functions will be necessary.
++ * CDAN-enabled channels only generate a single CDAN notification, after which
++ * it they need to be reenabled before they'll generate another. (The idea is
++ * that pull dequeuing will occur in reaction to the CDAN, followed by a
++ * reenable step.) Each function generates a distinct command to hardware, so a
++ * combination function is provided if the user wishes to modify the "context"
++ * (which shows up in each CDAN message) each time they reenable, as a single
++ * command to hardware. */
++/**
++ * qbman_swp_CDAN_set_context() - Set CDAN context
++ * @s: the software portal object.
++ * @channelid: the channel index.
++ * @ctx: the context to be set in CDAN.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_CDAN_set_context(struct qbman_swp *, uint16_t channelid,
++ uint64_t ctx);
++
++/**
++ * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
++ * @s: the software portal object.
++ * @channelid: the index of the channel to generate CDAN.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_CDAN_enable(struct qbman_swp *, uint16_t channelid);
++
++/**
++ * qbman_swp_CDAN_disable() - disable CDAN for the channel.
++ * @s: the software portal object.
++ * @channelid: the index of the channel to generate CDAN.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_CDAN_disable(struct qbman_swp *, uint16_t channelid);
++
++/**
++ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
++ * @s: the software portal object.
++ * @channelid: the index of the channel to generate CDAN.
++ * @ctx: the context set in CDAN.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_CDAN_set_context_enable(struct qbman_swp *, uint16_t channelid,
++ uint64_t ctx);
++
++#endif /* !_FSL_QBMAN_PORTAL_H */
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
+@@ -0,0 +1,846 @@
++/* Copyright (C) 2015 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qbman_portal.h"
++#include "qbman_debug.h"
++#include "fsl_qbman_portal.h"
++
++/* QBMan portal management command code */
++#define QBMAN_BP_QUERY 0x32
++#define QBMAN_FQ_QUERY 0x44
++#define QBMAN_FQ_QUERY_NP 0x45
++#define QBMAN_CGR_QUERY 0x51
++#define QBMAN_WRED_QUERY 0x54
++#define QBMAN_CGR_STAT_QUERY 0x55
++#define QBMAN_CGR_STAT_QUERY_CLR 0x56
++
++enum qbman_attr_usage_e {
++ qbman_attr_usage_fq,
++ qbman_attr_usage_bpool,
++ qbman_attr_usage_cgr,
++};
++
++struct int_qbman_attr {
++ uint32_t words[32];
++ enum qbman_attr_usage_e usage;
++};
++
++#define attr_type_set(a, e) \
++{ \
++ struct qbman_attr *__attr = a; \
++ enum qbman_attr_usage_e __usage = e; \
++ ((struct int_qbman_attr *)__attr)->usage = __usage; \
++}
++
++#define ATTR32(d) (&(d)->dont_manipulate_directly[0])
++#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16])
++
++static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1);
++static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1);
++static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1);
++static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16);
++static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16);
++static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16);
++static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16);
++static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16);
++static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16);
++static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14);
++static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15);
++static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1);
++static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32);
++static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32);
++static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32);
++static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32);
++static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16);
++static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3);
++static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32);
++static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32);
++static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8);
++static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 1, 8);
++static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 2, 8);
++
++void qbman_bp_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_bpool);
++}
++
++int qbman_bp_query(struct qbman_swp *s, uint32_t bpid,
++ struct qbman_attr *a)
++{
++ uint32_t *p;
++ uint32_t verb, rslt;
++ uint32_t *attr = ATTR32(a);
++
++ qbman_bp_attr_clear(a);
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ qb_attr_code_encode(&code_bp_bpid, p, bpid);
++
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_BP_QUERY);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(verb != QBMAN_BP_QUERY);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt);
++ return -EIO;
++ }
++
++ /* For the query, word[0] of the result contains only the
++ * verb/rslt fields, so skip word[0].
++ */
++ word_copy(&attr[1], &p[1], 15);
++ return 0;
++}
++
++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae)
++{
++ uint32_t *p = ATTR32(a);
++
++ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p);
++ *va = !!qb_attr_code_decode(&code_bp_va, p);
++ *wae = !!qb_attr_code_decode(&code_bp_wae, p);
++}
++
++static uint32_t qbman_bp_thresh_to_value(uint32_t val)
++{
++ return (val & 0xff) << ((val & 0xf00) >> 8);
++}
++
++void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet)
++{
++ uint32_t *p = ATTR32(a);
++
++ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet,
++ p));
++}
++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt)
++{
++ uint32_t *p = ATTR32(a);
++
++ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt,
++ p));
++}
++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet)
++{
++ uint32_t *p = ATTR32(a);
++
++ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet,
++ p));
++}
++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt)
++{
++ uint32_t *p = ATTR32(a);
++
++ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt,
++ p));
++}
++
++void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset)
++{
++ uint32_t *p = ATTR32(a);
++
++ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset,
++ p));
++}
++
++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt)
++{
++ uint32_t *p = ATTR32(a);
++
++ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt,
++ p));
++}
++
++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid)
++{
++ uint32_t *p = ATTR32(a);
++
++ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p);
++}
++
++void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl)
++{
++ uint32_t *p = ATTR32(a);
++
++ *icid = qb_attr_code_decode(&code_bp_icid, p);
++ *pl = !!qb_attr_code_decode(&code_bp_pl, p);
++}
++
++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr)
++{
++ uint32_t *p = ATTR32(a);
++
++ *bpscn_addr = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_hi,
++ p) << 32) |
++ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_lo,
++ p);
++}
++
++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx)
++{
++ uint32_t *p = ATTR32(a);
++
++ *bpscn_ctx = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p)
++ << 32) |
++ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_lo,
++ p);
++}
++
++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ)
++{
++ uint32_t *p = ATTR32(a);
++
++ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p);
++}
++
++int qbman_bp_info_has_free_bufs(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1);
++}
++
++int qbman_bp_info_is_depleted(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2);
++}
++
++int qbman_bp_info_is_surplus(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4);
++}
++
++uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_fill, p);
++}
++
++uint32_t qbman_bp_info_hdptr(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_hdptr, p);
++}
++
++uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_sdcnt, p);
++}
++
++uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_hdcnt, p);
++}
++
++uint32_t qbman_bp_info_sscnt(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_sscnt, p);
++}
++
++static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24);
++static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16);
++static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15);
++static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8);
++static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15);
++static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13);
++static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12);
++static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1);
++static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1);
++static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1);
++static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1);
++static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1);
++static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1);
++static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32);
++static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32);
++static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15);
++static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1);
++static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24);
++static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24);
++
++void qbman_fq_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_fq);
++}
++
++/* FQ query function for programmable fields */
++int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_attr *desc)
++{
++ uint32_t *p;
++ uint32_t verb, rslt;
++ uint32_t *d = ATTR32(desc);
++
++ qbman_fq_attr_clear(desc);
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ qb_attr_code_encode(&code_fq_fqid, p, fqid);
++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(verb != QBMAN_FQ_QUERY);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query of FQID 0x%x failed, code=0x%02x\n",
++ fqid, rslt);
++ return -EIO;
++ }
++ /* For the configure, word[0] of the command contains only the WE-mask.
++ * For the query, word[0] of the result contains only the verb/rslt
++ * fields. Skip word[0] in the latter case. */
++ word_copy(&d[1], &p[1], 15);
++ return 0;
++}
++
++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl)
++{
++ uint32_t *p = ATTR32(d);
++
++ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p);
++}
++
++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid)
++{
++ uint32_t *p = ATTR32(d);
++
++ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p);
++}
++
++void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq)
++{
++ uint32_t *p = ATTR32(d);
++
++ *destwq = qb_attr_code_decode(&code_fq_destwq, p);
++}
++
++void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred)
++{
++ uint32_t *p = ATTR32(d);
++
++ *icscred = qb_attr_code_decode(&code_fq_icscred, p);
++}
++
++static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5);
++static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8);
++static uint32_t qbman_thresh_to_value(uint32_t val)
++{
++ uint32_t m, e;
++
++ m = qb_attr_code_decode(&code_tdthresh_mant, &val);
++ e = qb_attr_code_decode(&code_tdthresh_exp, &val);
++ return m << e;
++}
++
++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh)
++{
++ uint32_t *p = ATTR32(d);
++
++ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh,
++ p));
++}
++
++void qbman_fq_attr_get_oa(struct qbman_attr *d,
++ int *oa_ics, int *oa_cgr, int32_t *oa_len)
++{
++ uint32_t *p = ATTR32(d);
++
++ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p);
++ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p);
++ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len,
++ qb_attr_code_decode(&code_fq_oa_len, p));
++}
++
++void qbman_fq_attr_get_mctl(struct qbman_attr *d,
++ int *bdi, int *ff, int *va, int *ps)
++{
++ uint32_t *p = ATTR32(d);
++
++ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p);
++ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p);
++ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p);
++ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p);
++}
++
++void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo)
++{
++ uint32_t *p = ATTR32(d);
++
++ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p);
++ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p);
++}
++
++void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl)
++{
++ uint32_t *p = ATTR32(d);
++
++ *icid = qb_attr_code_decode(&code_fq_icid, p);
++ *pl = !!qb_attr_code_decode(&code_fq_pl, p);
++}
++
++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid)
++{
++ uint32_t *p = ATTR32(d);
++
++ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p);
++}
++
++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid)
++{
++ uint32_t *p = ATTR32(d);
++
++ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p);
++}
++
++/* Query FQ Non-Programmalbe Fields */
++static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3);
++static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1);
++static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1);
++static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1);
++static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1);
++static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24);
++static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32);
++
++int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
++ struct qbman_attr *state)
++{
++ uint32_t *p;
++ uint32_t verb, rslt;
++ uint32_t *d = ATTR32(state);
++
++ qbman_fq_attr_clear(state);
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ qb_attr_code_encode(&code_fq_fqid, p, fqid);
++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(verb != QBMAN_FQ_QUERY_NP);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
++ fqid, rslt);
++ return -EIO;
++ }
++ word_copy(&d[0], &p[0], 16);
++ return 0;
++}
++
++uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_state, p);
++}
++
++int qbman_fq_state_force_eligible(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_fe, p);
++}
++
++int qbman_fq_state_xoff(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_x, p);
++}
++
++int qbman_fq_state_retirement_pending(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_r, p);
++}
++
++int qbman_fq_state_overflow_error(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_oe, p);
++}
++
++uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_frm_cnt, p);
++}
++
++uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_byte_cnt, p);
++}
++
++/* Query CGR */
++static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1);
++static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1);
++static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1);
++static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2);
++static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1);
++static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1);
++static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1);
++static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1);
++static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1);
++static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1);
++static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1);
++static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1);
++static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5);
++static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1);
++static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13);
++static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13);
++static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13);
++static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16);
++static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16);
++static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16);
++static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15);
++static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1);
++static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32);
++static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32);
++static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32);
++static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32);
++
++void qbman_cgr_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_cgr);
++}
++
++int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_attr *attr)
++{
++ uint32_t *p;
++ uint32_t verb, rslt;
++ uint32_t *d[2];
++ int i;
++ uint32_t query_verb;
++
++ d[0] = ATTR32(attr);
++ d[1] = ATTR32_1(attr);
++
++ qbman_cgr_attr_clear(attr);
++
++ for (i = 0; i < 2; i++) {
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY;
++
++ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(verb != query_verb);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query CGID 0x%x failed,", cgid);
++ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt);
++ return -EIO;
++ }
++ /* For the configure, word[0] of the command contains only the
++ * verb/cgid. For the query, word[0] of the result contains
++ * only the verb/rslt fields. Skip word[0] in the latter case.
++ */
++ word_copy(&d[i][1], &p[1], 15);
++ }
++ return 0;
++}
++
++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
++ int *cscn_wq_en_exit, int *cscn_wq_icd)
++ {
++ uint32_t *p = ATTR32(d);
++ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter,
++ p);
++ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p);
++ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p);
++}
++
++void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode,
++ int *rej_cnt_mode, int *cscn_bdi)
++{
++ uint32_t *p = ATTR32(d);
++ *mode = qb_attr_code_decode(&code_cgr_mode, p);
++ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p);
++ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p);
++}
++
++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
++ int *cscn_wr_en_exit, int *cg_wr_ae,
++ int *cscn_dcp_en, int *cg_wr_va)
++{
++ uint32_t *p = ATTR32(d);
++ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter,
++ p);
++ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p);
++ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p);
++ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p);
++ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p);
++}
++
++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
++ uint32_t *i_cnt_wr_bnd)
++{
++ uint32_t *p = ATTR32(d);
++ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p);
++ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p);
++}
++
++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en)
++{
++ uint32_t *p = ATTR32(d);
++ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p);
++}
++
++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres)
++{
++ uint32_t *p = ATTR32(d);
++ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_cs_thres, p));
++}
++
++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
++ uint32_t *cs_thres_x)
++{
++ uint32_t *p = ATTR32(d);
++ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_cs_thres_x, p));
++}
++
++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres)
++{
++ uint32_t *p = ATTR32(d);
++ *td_thres = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_td_thres, p));
++}
++
++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp)
++{
++ uint32_t *p = ATTR32(d);
++ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p);
++}
++
++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid)
++{
++ uint32_t *p = ATTR32(d);
++ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p);
++}
++
++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
++ uint32_t *cscn_vcgid)
++{
++ uint32_t *p = ATTR32(d);
++ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p);
++}
++
++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid,
++ int *pl)
++{
++ uint32_t *p = ATTR32(d);
++ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p);
++ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p);
++}
++
++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
++ uint64_t *cg_wr_addr)
++{
++ uint32_t *p = ATTR32(d);
++ *cg_wr_addr = ((uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi,
++ p) << 32) |
++ (uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo,
++ p);
++}
++
++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx)
++{
++ uint32_t *p = ATTR32(d);
++ *cscn_ctx = ((uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p)
++ << 32) |
++ (uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p);
++}
++
++#define WRED_EDP_WORD(n) (18 + n/4)
++#define WRED_EDP_OFFSET(n) (8 * (n % 4))
++#define WRED_PARM_DP_WORD(n) (n + 20)
++#define WRED_WE_EDP(n) (16 + n * 2)
++#define WRED_WE_PARM_DP(n) (17 + n * 2)
++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx,
++ int *edp)
++{
++ uint32_t *p = ATTR32(d);
++ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx),
++ WRED_EDP_OFFSET(idx), 8);
++ *edp = (int)qb_attr_code_decode(&code_wred_edp, p);
++}
++
++void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth,
++ uint64_t *maxth, uint8_t *maxp)
++{
++ uint8_t ma, mn, step_i, step_s, pn;
++
++ ma = (uint8_t)(dp >> 24);
++ mn = (uint8_t)(dp >> 19) & 0x1f;
++ step_i = (uint8_t)(dp >> 11);
++ step_s = (uint8_t)(dp >> 6) & 0x1f;
++ pn = (uint8_t)dp & 0x3f;
++
++ *maxp = ((pn<<2) * 100)/256;
++
++ if (mn == 0)
++ *maxth = ma;
++ else
++ *maxth = ((ma+256) * (1<<(mn-1)));
++
++ if (step_s == 0)
++ *minth = *maxth - step_i;
++ else
++ *minth = *maxth - (256 + step_i) * (1<<(step_s - 1));
++}
++
++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx,
++ uint32_t *dp)
++{
++ uint32_t *p = ATTR32(d);
++ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx),
++ 0, 8);
++ *dp = qb_attr_code_decode(&code_wred_parm_dp, p);
++}
++
++/* Query CGR/CCGR/CQ statistics */
++static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32);
++static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32);
++static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8);
++static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32);
++static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16);
++static int qbman_cgr_statistics_query(struct qbman_swp *s, uint32_t cgid,
++ int clear, uint32_t command_type,
++ uint64_t *frame_cnt, uint64_t *byte_cnt)
++{
++ uint32_t *p;
++ uint32_t verb, rslt;
++ uint32_t query_verb;
++ uint32_t hi, lo;
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
++ if (command_type < 2)
++ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type);
++ query_verb = clear ?
++ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY;
++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(verb != query_verb);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query statistics of CGID 0x%x failed,", cgid);
++ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt);
++ return -EIO;
++ }
++
++ if (*frame_cnt) {
++ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p);
++ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p);
++ *frame_cnt = ((uint64_t)hi << 32) | (uint64_t)lo;
++ }
++ if (*byte_cnt) {
++ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p);
++ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p);
++ *byte_cnt = ((uint64_t)hi << 32) | (uint64_t)lo;
++ }
++
++ return 0;
++}
++
++int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 0xff,
++ frame_cnt, byte_cnt);
++}
++
++int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 1,
++ frame_cnt, byte_cnt);
++}
++
++int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 0,
++ frame_cnt, byte_cnt);
++}
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
+@@ -0,0 +1,136 @@
++/* Copyright (C) 2015 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++struct qbman_attr {
++ uint32_t dont_manipulate_directly[40];
++};
++
++/* Buffer pool query commands */
++int qbman_bp_query(struct qbman_swp *s, uint32_t bpid,
++ struct qbman_attr *a);
++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae);
++void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet);
++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt);
++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet);
++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt);
++void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset);
++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt);
++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid);
++void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl);
++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr);
++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx);
++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ);
++int qbman_bp_info_has_free_bufs(struct qbman_attr *a);
++int qbman_bp_info_is_depleted(struct qbman_attr *a);
++int qbman_bp_info_is_surplus(struct qbman_attr *a);
++uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a);
++uint32_t qbman_bp_info_hdptr(struct qbman_attr *a);
++uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a);
++uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a);
++uint32_t qbman_bp_info_sscnt(struct qbman_attr *a);
++
++/* FQ query function for programmable fields */
++int qbman_fq_query(struct qbman_swp *s, uint32_t fqid,
++ struct qbman_attr *desc);
++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl);
++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid);
++void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq);
++void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred);
++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh);
++void qbman_fq_attr_get_oa(struct qbman_attr *d,
++ int *oa_ics, int *oa_cgr, int32_t *oa_len);
++void qbman_fq_attr_get_mctl(struct qbman_attr *d,
++ int *bdi, int *ff, int *va, int *ps);
++void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo);
++void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl);
++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid);
++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid);
++
++/* FQ query command for non-programmable fields*/
++enum qbman_fq_schedstate_e {
++ qbman_fq_schedstate_oos = 0,
++ qbman_fq_schedstate_retired,
++ qbman_fq_schedstate_tentatively_scheduled,
++ qbman_fq_schedstate_truly_scheduled,
++ qbman_fq_schedstate_parked,
++ qbman_fq_schedstate_held_active,
++};
++
++int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
++ struct qbman_attr *state);
++uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state);
++int qbman_fq_state_force_eligible(const struct qbman_attr *state);
++int qbman_fq_state_xoff(const struct qbman_attr *state);
++int qbman_fq_state_retirement_pending(const struct qbman_attr *state);
++int qbman_fq_state_overflow_error(const struct qbman_attr *state);
++uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state);
++uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state);
++
++/* CGR query */
++int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid,
++ struct qbman_attr *attr);
++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
++ int *cscn_wq_en_exit, int *cscn_wq_icd);
++void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode,
++ int *rej_cnt_mode, int *cscn_bdi);
++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
++ int *cscn_wr_en_exit, int *cg_wr_ae,
++ int *cscn_dcp_en, int *cg_wr_va);
++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
++ uint32_t *i_cnt_wr_bnd);
++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en);
++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres);
++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
++ uint32_t *cs_thres_x);
++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres);
++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp);
++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid);
++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
++ uint32_t *cscn_vcgid);
++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid,
++ int *pl);
++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
++ uint64_t *cg_wr_addr);
++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx);
++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx,
++ int *edp);
++void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth,
++ uint64_t *maxth, uint8_t *maxp);
++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx,
++ uint32_t *dp);
++
++/* CGR/CCGR/CQ statistics query */
++int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt);
++int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt);
++int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt);
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c
+@@ -0,0 +1,1212 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qbman_portal.h"
++
++/* QBMan portal management command codes */
++#define QBMAN_MC_ACQUIRE 0x30
++#define QBMAN_WQCHAN_CONFIGURE 0x46
++
++/* CINH register offsets */
++#define QBMAN_CINH_SWP_EQAR 0x8c0
++#define QBMAN_CINH_SWP_DQPI 0xa00
++#define QBMAN_CINH_SWP_DCAP 0xac0
++#define QBMAN_CINH_SWP_SDQCR 0xb00
++#define QBMAN_CINH_SWP_RAR 0xcc0
++#define QBMAN_CINH_SWP_ISR 0xe00
++#define QBMAN_CINH_SWP_IER 0xe40
++#define QBMAN_CINH_SWP_ISDR 0xe80
++#define QBMAN_CINH_SWP_IIR 0xec0
++
++/* CENA register offsets */
++#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
++#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
++#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
++#define QBMAN_CENA_SWP_CR 0x600
++#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
++#define QBMAN_CENA_SWP_VDQCR 0x780
++
++/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
++#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
++
++/* QBMan FQ management command codes */
++#define QBMAN_FQ_SCHEDULE 0x48
++#define QBMAN_FQ_FORCE 0x49
++#define QBMAN_FQ_XON 0x4d
++#define QBMAN_FQ_XOFF 0x4e
++
++/*******************************/
++/* Pre-defined attribute codes */
++/*******************************/
++
++struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
++struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
++
++/*************************/
++/* SDQCR attribute codes */
++/*************************/
++
++/* we put these here because at least some of them are required by
++ * qbman_swp_init() */
++struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
++struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
++struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
++#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
++enum qbman_sdqcr_dct {
++ qbman_sdqcr_dct_null = 0,
++ qbman_sdqcr_dct_prio_ics,
++ qbman_sdqcr_dct_active_ics,
++ qbman_sdqcr_dct_active
++};
++enum qbman_sdqcr_fc {
++ qbman_sdqcr_fc_one = 0,
++ qbman_sdqcr_fc_up_to_3 = 1
++};
++struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);
++
++/*********************************/
++/* Portal constructor/destructor */
++/*********************************/
++
++/* Software portals should always be in the power-on state when we initialise,
++ * due to the CCSR-based portal reset functionality that MC has.
++ *
++ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
++ * valid-bits, so we need to support a workaround where we don't trust
++ * valid-bits when detecting new entries until any stale ring entries have been
++ * overwritten at least once. The idea is that we read PI for the first few
++ * entries, then switch to valid-bit after that. The trick is to clear the
++ * bug-work-around boolean once the PI wraps around the ring for the first time.
++ *
++ * Note: this still carries a slight additional cost once the decrementer hits
++ * zero, so ideally the workaround should only be compiled in if the compiled
++ * image needs to support affected chips. We use WORKAROUND_DQRR_RESET_BUG for
++ * this.
++ */
++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
++{
++ int ret;
++ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
++
++ if (!p)
++ return NULL;
++ p->desc = d;
++#ifdef QBMAN_CHECKING
++ p->mc.check = swp_mc_can_start;
++#endif
++ p->mc.valid_bit = QB_VALID_BIT;
++ p->sdq = 0;
++ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
++ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
++ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
++ atomic_set(&p->vdq.busy, 1);
++ p->vdq.valid_bit = QB_VALID_BIT;
++ p->dqrr.next_idx = 0;
++ p->dqrr.valid_bit = QB_VALID_BIT;
++ /* TODO: should also read PI/CI type registers and check that they're on
++ * PoR values. If we're asked to initialise portals that aren't in reset
++ * state, bad things will follow. */
++#ifdef WORKAROUND_DQRR_RESET_BUG
++ p->dqrr.reset_bug = 1;
++#endif
++ if ((p->desc->qman_version & 0xFFFF0000) < QMAN_REV_4100)
++ p->dqrr.dqrr_size = 4;
++ else
++ p->dqrr.dqrr_size = 8;
++ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
++ if (ret) {
++ kfree(p);
++ pr_err("qbman_swp_sys_init() failed %d\n", ret);
++ return NULL;
++ }
++ /* SDQCR needs to be initialized to 0 when no channels are
++ being dequeued from or else the QMan HW will indicate an
++ error. The values that were calculated above will be
++ applied when dequeues from a specific channel are enabled */
++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
++ return p;
++}
++
++void qbman_swp_finish(struct qbman_swp *p)
++{
++#ifdef QBMAN_CHECKING
++ BUG_ON(p->mc.check != swp_mc_can_start);
++#endif
++ qbman_swp_sys_finish(&p->sys);
++ kfree(p);
++}
++
++const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
++{
++ return p->desc;
++}
++
++/**************/
++/* Interrupts */
++/**************/
++
++uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
++{
++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
++}
++
++void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
++{
++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
++}
++
++uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
++{
++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
++}
++
++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
++{
++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
++}
++
++uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
++{
++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
++}
++
++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
++{
++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
++}
++
++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
++{
++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
++}
++
++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
++{
++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
++}
++
++/***********************/
++/* Management commands */
++/***********************/
++
++/*
++ * Internal code common to all types of management commands.
++ */
++
++void *qbman_swp_mc_start(struct qbman_swp *p)
++{
++ void *ret;
++#ifdef QBMAN_CHECKING
++ BUG_ON(p->mc.check != swp_mc_can_start);
++#endif
++ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
++#ifdef QBMAN_CHECKING
++ if (!ret)
++ p->mc.check = swp_mc_can_submit;
++#endif
++ return ret;
++}
++
++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
++{
++ uint32_t *v = cmd;
++#ifdef QBMAN_CHECKING
++ BUG_ON(!p->mc.check != swp_mc_can_submit);
++#endif
++ /* TBD: "|=" is going to hurt performance. Need to move as many fields
++ * out of word zero, and for those that remain, the "OR" needs to occur
++ * at the caller side. This debug check helps to catch cases where the
++ * caller wants to OR but has forgotten to do so. */
++ BUG_ON((*v & cmd_verb) != *v);
++ *v = cmd_verb | p->mc.valid_bit;
++ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
++#ifdef QBMAN_CHECKING
++ p->mc.check = swp_mc_can_poll;
++#endif
++}
++
++void *qbman_swp_mc_result(struct qbman_swp *p)
++{
++ uint32_t *ret, verb;
++#ifdef QBMAN_CHECKING
++ BUG_ON(p->mc.check != swp_mc_can_poll);
++#endif
++ qbman_cena_invalidate_prefetch(&p->sys,
++ QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++ /* Remove the valid-bit - command completed iff the rest is non-zero */
++ verb = ret[0] & ~QB_VALID_BIT;
++ if (!verb)
++ return NULL;
++#ifdef QBMAN_CHECKING
++ p->mc.check = swp_mc_can_start;
++#endif
++ p->mc.valid_bit ^= QB_VALID_BIT;
++ return ret;
++}
++
++/***********/
++/* Enqueue */
++/***********/
++
++/* These should be const, eventually */
++static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
++static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);
++static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);
++static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);
++static struct qb_attr_code code_eq_dca_idx = QB_CODE(0, 8, 2);
++static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
++static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);
++static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);
++static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);
++static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);
++static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
++/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
++static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
++static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
++static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
++static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
++static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);
++static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
++
++enum qbman_eq_cmd_e {
++ /* No enqueue, primarily for plugging ORP gaps for dropped frames */
++ qbman_eq_cmd_empty,
++ /* DMA an enqueue response once complete */
++ qbman_eq_cmd_respond,
++ /* DMA an enqueue response only if the enqueue fails */
++ qbman_eq_cmd_respond_reject
++};
++
++void qbman_eq_desc_clear(struct qbman_eq_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++}
++
++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_eq_orp_en, cl, 0);
++ qb_attr_code_encode(&code_eq_cmd, cl,
++ respond_success ? qbman_eq_cmd_respond :
++ qbman_eq_cmd_respond_reject);
++}
++
++void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
++ uint32_t opr_id, uint32_t seqnum, int incomplete)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
++ qb_attr_code_encode(&code_eq_cmd, cl,
++ respond_success ? qbman_eq_cmd_respond :
++ qbman_eq_cmd_respond_reject);
++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
++ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);
++}
++
++void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
++ uint32_t seqnum)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
++ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
++ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
++ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);
++}
++
++void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
++ uint32_t seqnum)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
++ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
++ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
++ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);
++}
++
++void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
++ dma_addr_t storage_phys,
++ int stash)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
++ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
++}
++
++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);
++}
++
++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_eq_qd_en, cl, 0);
++ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid);
++}
++
++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
++ uint32_t qd_bin, uint32_t qd_prio)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_eq_qd_en, cl, 1);
++ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
++ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
++ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
++}
++
++void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable);
++}
++
++void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
++ uint32_t dqrr_idx, int park)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable);
++ if (enable) {
++ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park);
++ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);
++ }
++}
++
++#define EQAR_IDX(eqar) ((eqar) & 0x7)
++#define EQAR_VB(eqar) ((eqar) & 0x80)
++#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
++
++int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
++ const struct qbman_fd *fd)
++{
++ uint32_t *p;
++ const uint32_t *cl = qb_cl(d);
++ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
++
++ pr_debug("EQAR=%08x\n", eqar);
++ if (!EQAR_SUCCESS(eqar))
++ return -EBUSY;
++ p = qbman_cena_write_start(&s->sys,
++ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
++ word_copy(&p[1], &cl[1], 7);
++ word_copy(&p[8], fd, sizeof(*fd) >> 2);
++ /* Set the verb byte, have to substitute in the valid-bit */
++ p[0] = cl[0] | EQAR_VB(eqar);
++ qbman_cena_write_complete(&s->sys,
++ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)),
++ p);
++ return 0;
++}
++
++/*************************/
++/* Static (push) dequeue */
++/*************************/
++
++void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
++{
++ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
++
++ BUG_ON(channel_idx > 15);
++ *enabled = (int)qb_attr_code_decode(&code, &s->sdq);
++}
++
++void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
++{
++ uint16_t dqsrc;
++ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
++
++ BUG_ON(channel_idx > 15);
++ qb_attr_code_encode(&code, &s->sdq, !!enable);
++ /* Read make the complete src map. If no channels are enabled
++ the SDQCR must be 0 or else QMan will assert errors */
++ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);
++ if (dqsrc != 0)
++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
++ else
++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
++}
++
++/***************************/
++/* Volatile (pull) dequeue */
++/***************************/
++
++/* These should be const, eventually */
++static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
++static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
++static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
++static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
++static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
++static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
++static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
++static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
++
++enum qb_pull_dt_e {
++ qb_pull_dt_channel,
++ qb_pull_dt_workqueue,
++ qb_pull_dt_framequeue
++};
++
++void qbman_pull_desc_clear(struct qbman_pull_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++}
++
++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
++ struct dpaa2_dq *storage,
++ dma_addr_t storage_phys,
++ int stash)
++{
++ uint32_t *cl = qb_cl(d);
++
++ /* Squiggle the pointer 'storage' into the extra 2 words of the
++ * descriptor (which aren't copied to the hw command) */
++ *(void **)&cl[4] = storage;
++ if (!storage) {
++ qb_attr_code_encode(&code_pull_rls, cl, 0);
++ return;
++ }
++ qb_attr_code_encode(&code_pull_rls, cl, 1);
++ qb_attr_code_encode(&code_pull_stash, cl, !!stash);
++ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
++}
++
++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
++{
++ uint32_t *cl = qb_cl(d);
++
++ BUG_ON(!numframes || (numframes > 16));
++ qb_attr_code_encode(&code_pull_numframes, cl,
++ (uint32_t)(numframes - 1));
++}
++
++void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_pull_token, cl, token);
++}
++
++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_pull_dct, cl, 1);
++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
++ qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
++}
++
++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
++ enum qbman_pull_type_e dct)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_pull_dct, cl, dct);
++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);
++ qb_attr_code_encode(&code_pull_dqsource, cl, wqid);
++}
++
++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
++ enum qbman_pull_type_e dct)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_pull_dct, cl, dct);
++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);
++ qb_attr_code_encode(&code_pull_dqsource, cl, chid);
++}
++
++int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
++{
++ uint32_t *p;
++ uint32_t *cl = qb_cl(d);
++
++ if (!atomic_dec_and_test(&s->vdq.busy)) {
++ atomic_inc(&s->vdq.busy);
++ return -EBUSY;
++ }
++ s->vdq.storage = *(void **)&cl[4];
++ qb_attr_code_encode(&code_pull_token, cl, 1);
++ p = qbman_cena_write_start(&s->sys, QBMAN_CENA_SWP_VDQCR);
++ word_copy(&p[1], &cl[1], 3);
++ /* Set the verb byte, have to substitute in the valid-bit */
++ p[0] = cl[0] | s->vdq.valid_bit;
++ s->vdq.valid_bit ^= QB_VALID_BIT;
++ qbman_cena_write_complete(&s->sys, QBMAN_CENA_SWP_VDQCR, p);
++ return 0;
++}
++
++/****************/
++/* Polling DQRR */
++/****************/
++
++static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
++static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
++static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
++static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);
++static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);
++/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
++static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);
++static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);
++static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);
++static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
++
++#define QBMAN_RESULT_DQ 0x60
++#define QBMAN_RESULT_FQRN 0x21
++#define QBMAN_RESULT_FQRNI 0x22
++#define QBMAN_RESULT_FQPN 0x24
++#define QBMAN_RESULT_FQDAN 0x25
++#define QBMAN_RESULT_CDAN 0x26
++#define QBMAN_RESULT_CSCN_MEM 0x27
++#define QBMAN_RESULT_CGCU 0x28
++#define QBMAN_RESULT_BPSCN 0x29
++#define QBMAN_RESULT_CSCN_WQ 0x2a
++
++static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);
++
++/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
++ * only once, so repeated calls can return a sequence of DQRR entries, without
++ * requiring they be consumed immediately or in any particular order. */
++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
++{
++ uint32_t verb;
++ uint32_t response_verb;
++ uint32_t flags;
++ const struct dpaa2_dq *dq;
++ const uint32_t *p;
++
++ /* Before using valid-bit to detect if something is there, we have to
++ * handle the case of the DQRR reset bug... */
++#ifdef WORKAROUND_DQRR_RESET_BUG
++ if (unlikely(s->dqrr.reset_bug)) {
++ /* We pick up new entries by cache-inhibited producer index,
++ * which means that a non-coherent mapping would require us to
++ * invalidate and read *only* once that PI has indicated that
++ * there's an entry here. The first trip around the DQRR ring
++ * will be much less efficient than all subsequent trips around
++ * it...
++ */
++ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
++ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
++ /* there are new entries iff pi != next_idx */
++ if (pi == s->dqrr.next_idx)
++ return NULL;
++ /* if next_idx is/was the last ring index, and 'pi' is
++ * different, we can disable the workaround as all the ring
++ * entries have now been DMA'd to so valid-bit checking is
++ * repaired. Note: this logic needs to be based on next_idx
++ * (which increments one at a time), rather than on pi (which
++ * can burst and wrap-around between our snapshots of it).
++ */
++ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
++ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
++ s->dqrr.next_idx, pi);
++ s->dqrr.reset_bug = 0;
++ }
++ qbman_cena_invalidate_prefetch(&s->sys,
++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ }
++#endif
++
++ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ p = qb_cl(dq);
++ verb = qb_attr_code_decode(&code_dqrr_verb, p);
++
++ /* If the valid-bit isn't of the expected polarity, nothing there. Note,
++ * in the DQRR reset bug workaround, we shouldn't need to skip these
++ * check, because we've already determined that a new entry is available
++ * and we've invalidated the cacheline before reading it, so the
++ * valid-bit behaviour is repaired and should tell us what we already
++ * knew from reading PI.
++ */
++ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
++ qbman_cena_invalidate_prefetch(&s->sys,
++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ return NULL;
++ }
++ /* There's something there. Move "next_idx" attention to the next ring
++ * entry (and prefetch it) before returning what we found. */
++ s->dqrr.next_idx++;
++ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
++ /* TODO: it's possible to do all this without conditionals, optimise it
++ * later. */
++ if (!s->dqrr.next_idx)
++ s->dqrr.valid_bit ^= QB_VALID_BIT;
++
++ /* If this is the final response to a volatile dequeue command
++ indicate that the vdq is no longer busy */
++ flags = dpaa2_dq_flags(dq);
++ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
++ if ((response_verb == QBMAN_RESULT_DQ) &&
++ (flags & DPAA2_DQ_STAT_VOLATILE) &&
++ (flags & DPAA2_DQ_STAT_EXPIRED))
++ atomic_inc(&s->vdq.busy);
++
++ qbman_cena_invalidate_prefetch(&s->sys,
++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ return dq;
++}
++
++/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
++{
++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
++}
++
++/*********************************/
++/* Polling user-provided storage */
++/*********************************/
++
++int qbman_result_has_new_result(struct qbman_swp *s,
++ const struct dpaa2_dq *dq)
++{
++ /* To avoid converting the little-endian DQ entry to host-endian prior
++ * to us knowing whether there is a valid entry or not (and run the
++ * risk of corrupting the incoming hardware LE write), we detect in
++ * hardware endianness rather than host. This means we need a different
++ * "code" depending on whether we are BE or LE in software, which is
++ * where DQRR_TOK_OFFSET comes in... */
++ static struct qb_attr_code code_dqrr_tok_detect =
++ QB_CODE(0, DQRR_TOK_OFFSET, 8);
++ /* The user trying to poll for a result treats "dq" as const. It is
++ * however the same address that was provided to us non-const in the
++ * first place, for directing hardware DMA to. So we can cast away the
++ * const because it is mutable from our perspective. */
++ uint32_t *p = qb_cl((struct dpaa2_dq *)dq);
++ uint32_t token;
++
++ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
++ if (token != 1)
++ return 0;
++ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);
++
++ /* Only now do we convert from hardware to host endianness. Also, as we
++ * are returning success, the user has promised not to call us again, so
++ * there's no risk of us converting the endianness twice... */
++ make_le32_n(p, 16);
++
++ /* VDQCR "no longer busy" hook - not quite the same as DQRR, because the
++ * fact "VDQCR" shows busy doesn't mean that the result we're looking at
++ * is from the same command. Eg. we may be looking at our 10th dequeue
++ * result from our first VDQCR command, yet the second dequeue command
++ * could have been kicked off already, after seeing the 1st result. Ie.
++ * the result we're looking at is not necessarily proof that we can
++ * reset "busy". We instead base the decision on whether the current
++ * result is sitting at the first 'storage' location of the busy
++ * command. */
++ if (s->vdq.storage == dq) {
++ s->vdq.storage = NULL;
++ atomic_inc(&s->vdq.busy);
++ }
++ return 1;
++}
++
++/********************************/
++/* Categorising qbman_result */
++/********************************/
++
++static struct qb_attr_code code_result_in_mem =
++ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);
++
++static inline int __qbman_result_is_x(const struct dpaa2_dq *dq, uint32_t x)
++{
++ const uint32_t *p = qb_cl(dq);
++ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
++
++ return response_verb == x;
++}
++
++static inline int __qbman_result_is_x_in_mem(const struct dpaa2_dq *dq,
++ uint32_t x)
++{
++ const uint32_t *p = qb_cl(dq);
++ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);
++
++ return (response_verb == x);
++}
++
++int qbman_result_is_DQ(const struct dpaa2_dq *dq)
++{
++ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
++}
++
++int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
++{
++ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
++}
++
++int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
++{
++ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
++}
++
++int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
++{
++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||
++ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
++}
++
++int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
++{
++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);
++}
++
++int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
++{
++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);
++}
++
++int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
++{
++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);
++}
++
++int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
++{
++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);
++}
++
++int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
++{
++ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
++}
++
++/*********************************/
++/* Parsing frame dequeue results */
++/*********************************/
++
++/* These APIs assume qbman_result_is_DQ() is TRUE */
++
++uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++
++ return qb_attr_code_decode(&code_dqrr_stat, p);
++}
++
++uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++
++ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);
++}
++
++uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++
++ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);
++}
++
++uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++
++ return qb_attr_code_decode(&code_dqrr_fqid, p);
++}
++
++uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++
++ return qb_attr_code_decode(&code_dqrr_byte_count, p);
++}
++
++uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++
++ return qb_attr_code_decode(&code_dqrr_frame_count, p);
++}
++
++uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq)
++{
++ const uint64_t *p = (uint64_t *)qb_cl(dq);
++
++ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);
++}
++EXPORT_SYMBOL(dpaa2_dq_fqd_ctx);
++
++const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++
++ return (const struct dpaa2_fd *)&p[8];
++}
++EXPORT_SYMBOL(dpaa2_dq_fd);
++
++/**************************************/
++/* Parsing state-change notifications */
++/**************************************/
++
++static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);
++static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);
++static struct qb_attr_code code_scn_state_in_mem =
++ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);
++static struct qb_attr_code code_scn_rid_in_mem =
++ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);
++static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);
++
++uint8_t qbman_result_SCN_state(const struct dpaa2_dq *scn)
++{
++ const uint32_t *p = qb_cl(scn);
++
++ return (uint8_t)qb_attr_code_decode(&code_scn_state, p);
++}
++
++uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *scn)
++{
++ const uint32_t *p = qb_cl(scn);
++
++ return qb_attr_code_decode(&code_scn_rid, p);
++}
++
++uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
++{
++ const uint64_t *p = (uint64_t *)qb_cl(scn);
++
++ return qb_attr_code_decode_64(&code_scn_ctx_lo, p);
++}
++
++uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *scn)
++{
++ const uint32_t *p = qb_cl(scn);
++
++ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);
++}
++
++uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *scn)
++{
++ const uint32_t *p = qb_cl(scn);
++ uint32_t result_rid;
++
++ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);
++ return make_le24(result_rid);
++}
++
++/*****************/
++/* Parsing BPSCN */
++/*****************/
++uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *scn)
++{
++ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;
++}
++
++int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *scn)
++{
++ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);
++}
++
++int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *scn)
++{
++ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);
++}
++
++int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *scn)
++{
++ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);
++}
++
++uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *scn)
++{
++ return qbman_result_SCN_ctx(scn);
++}
++
++/*****************/
++/* Parsing CGCU */
++/*****************/
++uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *scn)
++{
++ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;
++}
++
++uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *scn)
++{
++ return qbman_result_SCN_ctx(scn) & 0xFFFFFFFFFF;
++}
++
++/******************/
++/* Buffer release */
++/******************/
++
++/* These should be const, eventually */
++/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
++static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
++static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);
++static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
++
++void qbman_release_desc_clear(struct qbman_release_desc *d)
++{
++ uint32_t *cl;
++
++ memset(d, 0, sizeof(*d));
++ cl = qb_cl(d);
++ qb_attr_code_encode(&code_release_set_me, cl, 1);
++}
++
++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_release_bpid, cl, bpid);
++}
++
++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
++{
++ uint32_t *cl = qb_cl(d);
++
++ qb_attr_code_encode(&code_release_rcdi, cl, !!enable);
++}
++
++#define RAR_IDX(rar) ((rar) & 0x7)
++#define RAR_VB(rar) ((rar) & 0x80)
++#define RAR_SUCCESS(rar) ((rar) & 0x100)
++
++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
++ const uint64_t *buffers, unsigned int num_buffers)
++{
++ uint32_t *p;
++ const uint32_t *cl = qb_cl(d);
++ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
++
++ pr_debug("RAR=%08x\n", rar);
++ if (!RAR_SUCCESS(rar))
++ return -EBUSY;
++ BUG_ON(!num_buffers || (num_buffers > 7));
++ /* Start the release command */
++ p = qbman_cena_write_start(&s->sys,
++ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
++ /* Copy the caller's buffer pointers to the command */
++ u64_to_le32_copy(&p[2], buffers, num_buffers);
++ /* Set the verb byte, have to substitute in the valid-bit and the number
++ * of buffers. */
++ p[0] = cl[0] | RAR_VB(rar) | num_buffers;
++ qbman_cena_write_complete(&s->sys,
++ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)),
++ p);
++ return 0;
++}
++
++/*******************/
++/* Buffer acquires */
++/*******************/
++
++/* These should be const, eventually */
++static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
++static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
++
++int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
++ unsigned int num_buffers)
++{
++ uint32_t *p;
++ uint32_t verb, rslt, num;
++
++ BUG_ON(!num_buffers || (num_buffers > 7));
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ qb_attr_code_encode(&code_acquire_bpid, p, bpid);
++ qb_attr_code_encode(&code_acquire_num, p, num_buffers);
++
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ num = qb_attr_code_decode(&code_acquire_r_num, p);
++ BUG_ON(verb != QBMAN_MC_ACQUIRE);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
++ bpid, rslt);
++ return -EIO;
++ }
++ BUG_ON(num > num_buffers);
++ /* Copy the acquired buffers to the caller's array */
++ u64_from_le32_copy(buffers, &p[2], num);
++ return (int)num;
++}
++
++/*****************/
++/* FQ management */
++/*****************/
++
++static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);
++
++static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
++ uint8_t alt_fq_verb)
++{
++ uint32_t *p;
++ uint32_t verb, rslt;
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ qb_attr_code_encode(&code_fqalt_fqid, p, fqid);
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(verb != alt_fq_verb);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
++ fqid, alt_fq_verb, rslt);
++ return -EIO;
++ }
++
++ return 0;
++}
++
++int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
++}
++
++int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
++}
++
++int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
++}
++
++int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
++}
++
++/**********************/
++/* Channel management */
++/**********************/
++
++static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);
++static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);
++static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);
++static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);
++
++/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
++ * would be irresponsible to expose it. */
++#define CODE_CDAN_WE_EN 0x1
++#define CODE_CDAN_WE_CTX 0x4
++
++static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
++ uint8_t we_mask, uint8_t cdan_en,
++ uint64_t ctx)
++{
++ uint32_t *p;
++ uint32_t verb, rslt;
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ qb_attr_code_encode(&code_cdan_cid, p, channelid);
++ qb_attr_code_encode(&code_cdan_we, p, we_mask);
++ qb_attr_code_encode(&code_cdan_en, p, cdan_en);
++ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(verb != QBMAN_WQCHAN_CONFIGURE);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("CDAN cQID %d failed: code = 0x%02x\n",
++ channelid, rslt);
++ return -EIO;
++ }
++
++ return 0;
++}
++
++int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
++ uint64_t ctx)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_CTX,
++ 0, ctx);
++}
++
++int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN,
++ 1, 0);
++}
++int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN,
++ 0, 0);
++}
++int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
++ uint64_t ctx)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
++ 1, ctx);
++}
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h
+@@ -0,0 +1,261 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qbman_private.h"
++#include "fsl_qbman_portal.h"
++#include "../../include/fsl_dpaa2_fd.h"
++
++/* All QBMan command and result structures use this "valid bit" encoding */
++#define QB_VALID_BIT ((uint32_t)0x80)
++
++/* Management command result codes */
++#define QBMAN_MC_RSLT_OK 0xf0
++
++/* TBD: as of QBMan 4.1, DQRR will be 8 rather than 4! */
++#define QBMAN_DQRR_SIZE 4
++
++/* DQRR valid-bit reset bug. See qbman_portal.c::qbman_swp_init(). */
++#define WORKAROUND_DQRR_RESET_BUG
++
++/* --------------------- */
++/* portal data structure */
++/* --------------------- */
++
++struct qbman_swp {
++ const struct qbman_swp_desc *desc;
++ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it
++ * needs in here. */
++ struct qbman_swp_sys sys;
++ /* Management commands */
++ struct {
++#ifdef QBMAN_CHECKING
++ enum swp_mc_check {
++ swp_mc_can_start, /* call __qbman_swp_mc_start() */
++ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */
++ swp_mc_can_poll, /* call __qbman_swp_mc_result() */
++ } check;
++#endif
++ uint32_t valid_bit; /* 0x00 or 0x80 */
++ } mc;
++ /* Push dequeues */
++ uint32_t sdq;
++ /* Volatile dequeues */
++ struct {
++ /* VDQCR supports a "1 deep pipeline", meaning that if you know
++ * the last-submitted command is already executing in the
++ * hardware (as evidenced by at least 1 valid dequeue result),
++ * you can write another dequeue command to the register, the
++ * hardware will start executing it as soon as the
++ * already-executing command terminates. (This minimises latency
++ * and stalls.) With that in mind, this "busy" variable refers
++ * to whether or not a command can be submitted, not whether or
++ * not a previously-submitted command is still executing. In
++ * other words, once proof is seen that the previously-submitted
++ * command is executing, "vdq" is no longer "busy".
++ */
++ atomic_t busy;
++ uint32_t valid_bit; /* 0x00 or 0x80 */
++ /* We need to determine when vdq is no longer busy. This depends
++ * on whether the "busy" (last-submitted) dequeue command is
++ * targeting DQRR or main-memory, and detected is based on the
++ * presence of the dequeue command's "token" showing up in
++ * dequeue entries in DQRR or main-memory (respectively). */
++ struct dpaa2_dq *storage; /* NULL if DQRR */
++ } vdq;
++ /* DQRR */
++ struct {
++ uint32_t next_idx;
++ uint32_t valid_bit;
++ uint8_t dqrr_size;
++#ifdef WORKAROUND_DQRR_RESET_BUG
++ int reset_bug;
++#endif
++ } dqrr;
++};
++
++/* -------------------------- */
++/* portal management commands */
++/* -------------------------- */
++
++/* Different management commands all use this common base layer of code to issue
++ * commands and poll for results. The first function returns a pointer to where
++ * the caller should fill in their MC command (though they should ignore the
++ * verb byte), the second function commits merges in the caller-supplied command
++ * verb (which should not include the valid-bit) and submits the command to
++ * hardware, and the third function checks for a completed response (returns
++ * non-NULL if only if the response is complete). */
++void *qbman_swp_mc_start(struct qbman_swp *p);
++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb);
++void *qbman_swp_mc_result(struct qbman_swp *p);
++
++/* Wraps up submit + poll-for-result */
++static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
++ uint32_t cmd_verb)
++{
++ int loopvar;
++
++ qbman_swp_mc_submit(swp, cmd, cmd_verb);
++ DBG_POLL_START(loopvar);
++ do {
++ DBG_POLL_CHECK(loopvar);
++ cmd = qbman_swp_mc_result(swp);
++ } while (!cmd);
++ return cmd;
++}
++
++/* ------------ */
++/* qb_attr_code */
++/* ------------ */
++
++/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
++ * is either serving as a configuration command or a query result. The
++ * representation is inherently little-endian, as the indexing of the words is
++ * itself little-endian in nature and layerscape is little endian for anything
++ * that crosses a word boundary too (64-bit fields are the obvious examples).
++ */
++struct qb_attr_code {
++ unsigned int word; /* which uint32_t[] array member encodes the field */
++ unsigned int lsoffset; /* encoding offset from ls-bit */
++ unsigned int width; /* encoding width. (bool must be 1.) */
++};
++
++/* Some pre-defined codes */
++extern struct qb_attr_code code_generic_verb;
++extern struct qb_attr_code code_generic_rslt;
++
++/* Macros to define codes */
++#define QB_CODE(a, b, c) { a, b, c}
++#define QB_CODE_NULL \
++ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1)
++
++/* Rotate a code "ms", meaning that it moves from less-significant bytes to
++ * more-significant, from less-significant words to more-significant, etc. The
++ * "ls" version does the inverse, from more-significant towards
++ * less-significant.
++ */
++static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code,
++ unsigned int bits)
++{
++ code->lsoffset += bits;
++ while (code->lsoffset > 31) {
++ code->word++;
++ code->lsoffset -= 32;
++ }
++}
++static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code,
++ unsigned int bits)
++{
++ /* Don't be fooled, this trick should work because the types are
++ * unsigned. So the case that interests the while loop (the rotate has
++ * gone too far and the word count needs to compensate for it), is
++ * manifested when lsoffset is negative. But that equates to a really
++ * large unsigned value, starting with lots of "F"s. As such, we can
++ * continue adding 32 back to it until it wraps back round above zero,
++ * to a value of 31 or less...
++ */
++ code->lsoffset -= bits;
++ while (code->lsoffset > 31) {
++ code->word--;
++ code->lsoffset += 32;
++ }
++}
++/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */
++#define qb_attr_code_for_ms(code, bits, expr) \
++ for (; expr; qb_attr_code_rotate_ms(code, bits))
++#define qb_attr_code_for_ls(code, bits, expr) \
++ for (; expr; qb_attr_code_rotate_ls(code, bits))
++
++/* decode a field from a cacheline */
++static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code,
++ const uint32_t *cacheline)
++{
++ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]);
++}
++static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code,
++ const uint64_t *cacheline)
++{
++ uint64_t res;
++ u64_from_le32_copy(&res, &cacheline[code->word/2], 1);
++ return res;
++}
++
++/* encode a field to a cacheline */
++static inline void qb_attr_code_encode(const struct qb_attr_code *code,
++ uint32_t *cacheline, uint32_t val)
++{
++ cacheline[code->word] =
++ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word])
++ | e32_uint32_t(code->lsoffset, code->width, val);
++}
++static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,
++ uint64_t *cacheline, uint64_t val)
++{
++ u64_to_le32_copy(&cacheline[code->word/2], &val, 1);
++}
++
++/* Small-width signed values (two's-complement) will decode into medium-width
++ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to
++ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value
++ * 249. Likewise -120 would decode as 136.) This function allows the caller to
++ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit
++ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t).
++ */
++static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code,
++ uint32_t val)
++{
++ BUG_ON(val >= (1 << code->width));
++ /* If the high bit was set, it was encoding a negative */
++ if (val >= (1 << (code->width - 1)))
++ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) -
++ val);
++ /* Otherwise, it was encoding a positive */
++ return (int32_t)val;
++}
++
++/* ---------------------- */
++/* Descriptors/cachelines */
++/* ---------------------- */
++
++/* To avoid needless dynamic allocation, the driver API often gives the caller
++ * a "descriptor" type that the caller can instantiate however they like.
++ * Ultimately though, it is just a cacheline of binary storage (or something
++ * smaller when it is known that the descriptor doesn't need all 64 bytes) for
++ * holding pre-formatted pieces of hardware commands. The performance-critical
++ * code can then copy these descriptors directly into hardware command
++ * registers more efficiently than trying to construct/format commands
++ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
++ * order for the compiler to know its size, but the internal details are not
++ * exposed. The following macro is used within the driver for converting *any*
++ * descriptor pointer to a usable array pointer. The use of a macro (instead of
++ * an inline) is necessary to work with different descriptor types and to work
++ * correctly with const and non-const inputs (and similarly-qualified outputs).
++ */
++#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h
+@@ -0,0 +1,173 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++*/
++
++/* Perform extra checking */
++#define QBMAN_CHECKING
++
++/* To maximise the amount of logic that is common between the Linux driver and
++ * other targets (such as the embedded MC firmware), we pivot here between the
++ * inclusion of two platform-specific headers.
++ *
++ * The first, qbman_sys_decl.h, includes any and all required system headers as
++ * well as providing any definitions for the purposes of compatibility. The
++ * second, qbman_sys.h, is where platform-specific routines go.
++ *
++ * The point of the split is that the platform-independent code (including this
++ * header) may depend on platform-specific declarations, yet other
++ * platform-specific routines may depend on platform-independent definitions.
++ */
++
++#include "qbman_sys_decl.h"
++
++#define QMAN_REV_4000 0x04000000
++#define QMAN_REV_4100 0x04010000
++#define QMAN_REV_4101 0x04010001
++
++/* When things go wrong, it is a convenient trick to insert a few FOO()
++ * statements in the code to trace progress. TODO: remove this once we are
++ * hacking the code less actively.
++ */
++#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__)
++
++/* Any time there is a register interface which we poll on, this provides a
++ * "break after x iterations" scheme for it. It's handy for debugging, eg.
++ * where you don't want millions of lines of log output from a polling loop
++ * that won't, because such things tend to drown out the earlier log output
++ * that might explain what caused the problem. (NB: put ";" after each macro!)
++ * TODO: we should probably remove this once we're done sanitising the
++ * simulator...
++ */
++#define DBG_POLL_START(loopvar) (loopvar = 10)
++#define DBG_POLL_CHECK(loopvar) \
++ do {if (!(loopvar--)) BUG_ON(1); } while (0)
++
++/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets
++ * and widths, these macro-generated encode/decode/isolate/remove inlines can
++ * be used.
++ *
++ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type),
++ * where the field is located 3 bits "up" from the least-significant bit of the
++ * register (ie. the field location within the 32-bit register corresponds to a
++ * mask of 0x0001fff8), you would do;
++ * uint16_t field = d32_uint16_t(3, 14, reg_value);
++ *
++ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE,
++ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!"
++ * operator) into a register at bit location 0x00080000 (19 bits "in" from the
++ * LS bit), do;
++ * reg_value |= e32_int(19, 1, !!field);
++ *
++ * If you wish to read-modify-write a register, such that you leave the 14-bit
++ * field as-is but have all other fields set to zero, then "i"solate the 14-bit
++ * value using;
++ * reg_value = i32_uint16_t(3, 14, reg_value);
++ *
++ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to
++ * zero) but leaving all other fields as-is;
++ * reg_val = r32_int(19, 1, reg_value);
++ *
++ */
++#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \
++ (uint32_t)((1 << width) - 1))
++#define DECLARE_CODEC32(t) \
++static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \
++{ \
++ BUG_ON(width > (sizeof(t) * 8)); \
++ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \
++} \
++static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \
++{ \
++ BUG_ON(width > (sizeof(t) * 8)); \
++ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \
++} \
++static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \
++ uint32_t val) \
++{ \
++ BUG_ON(width > (sizeof(t) * 8)); \
++ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \
++} \
++static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \
++ uint32_t val) \
++{ \
++ BUG_ON(width > (sizeof(t) * 8)); \
++ return ~(MAKE_MASK32(width) << lsoffset) & val; \
++}
++DECLARE_CODEC32(uint32_t)
++DECLARE_CODEC32(uint16_t)
++DECLARE_CODEC32(uint8_t)
++DECLARE_CODEC32(int)
++
++ /*********************/
++ /* Debugging assists */
++ /*********************/
++
++static inline void __hexdump(unsigned long start, unsigned long end,
++ unsigned long p, size_t sz, const unsigned char *c)
++{
++ while (start < end) {
++ unsigned int pos = 0;
++ char buf[64];
++ int nl = 0;
++
++ pos += sprintf(buf + pos, "%08lx: ", start);
++ do {
++ if ((start < p) || (start >= (p + sz)))
++ pos += sprintf(buf + pos, "..");
++ else
++ pos += sprintf(buf + pos, "%02x", *(c++));
++ if (!(++start & 15)) {
++ buf[pos++] = '\n';
++ nl = 1;
++ } else {
++ nl = 0;
++ if (!(start & 1))
++ buf[pos++] = ' ';
++ if (!(start & 3))
++ buf[pos++] = ' ';
++ }
++ } while (start & 15);
++ if (!nl)
++ buf[pos++] = '\n';
++ buf[pos] = '\0';
++ pr_info("%s", buf);
++ }
++}
++static inline void hexdump(const void *ptr, size_t sz)
++{
++ unsigned long p = (unsigned long)ptr;
++ unsigned long start = p & ~(unsigned long)15;
++ unsigned long end = (p + sz + 15) & ~(unsigned long)15;
++ const unsigned char *c = ptr;
++
++ __hexdump(start, end, p, sz, c);
++}
++
++#include "qbman_sys.h"
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h
+@@ -0,0 +1,307 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
++ * driver. They are only included via qbman_private.h, which is itself a
++ * platform-independent file and is included by all the other driver source.
++ *
++ * qbman_sys_decl.h is included prior to all other declarations and logic, and
++ * it exists to provide compatibility with any linux interfaces our
++ * single-source driver code is dependent on (eg. kmalloc). Ie. this file
++ * provides linux compatibility.
++ *
++ * This qbman_sys.h header, on the other hand, is included *after* any common
++ * and platform-neutral declarations and logic in qbman_private.h, and exists to
++ * implement any platform-specific logic of the qbman driver itself. Ie. it is
++ * *not* to provide linux compatibility.
++ */
++
++/* Trace the 3 different classes of read/write access to QBMan. #undef as
++ * required. */
++#undef QBMAN_CCSR_TRACE
++#undef QBMAN_CINH_TRACE
++#undef QBMAN_CENA_TRACE
++
++static inline void word_copy(void *d, const void *s, unsigned int cnt)
++{
++ uint32_t *dd = d;
++ const uint32_t *ss = s;
++
++ while (cnt--)
++ *(dd++) = *(ss++);
++}
++
++/* Currently, the CENA support code expects each 32-bit word to be written in
++ * host order, and these are converted to hardware (little-endian) order on
++ * command submission. However, 64-bit quantities are must be written (and read)
++ * as two 32-bit words with the least-significant word first, irrespective of
++ * host endianness. */
++static inline void u64_to_le32_copy(void *d, const uint64_t *s,
++ unsigned int cnt)
++{
++ uint32_t *dd = d;
++ const uint32_t *ss = (const uint32_t *)s;
++
++ while (cnt--) {
++ /* TBD: the toolchain was choking on the use of 64-bit types up
++ * until recently so this works entirely with 32-bit variables.
++ * When 64-bit types become usable again, investigate better
++ * ways of doing this. */
++#if defined(__BIG_ENDIAN)
++ *(dd++) = ss[1];
++ *(dd++) = ss[0];
++ ss += 2;
++#else
++ *(dd++) = *(ss++);
++ *(dd++) = *(ss++);
++#endif
++ }
++}
++static inline void u64_from_le32_copy(uint64_t *d, const void *s,
++ unsigned int cnt)
++{
++ const uint32_t *ss = s;
++ uint32_t *dd = (uint32_t *)d;
++
++ while (cnt--) {
++#if defined(__BIG_ENDIAN)
++ dd[1] = *(ss++);
++ dd[0] = *(ss++);
++ dd += 2;
++#else
++ *(dd++) = *(ss++);
++ *(dd++) = *(ss++);
++#endif
++ }
++}
++
++/* Convert a host-native 32bit value into little endian */
++#if defined(__BIG_ENDIAN)
++static inline uint32_t make_le32(uint32_t val)
++{
++ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
++ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24);
++}
++static inline uint32_t make_le24(uint32_t val)
++{
++ return (((val & 0xff) << 16) | (val & 0xff00) |
++ ((val & 0xff0000) >> 16));
++}
++#else
++#define make_le32(val) (val)
++#define make_le24(val) (val)
++#endif
++static inline void make_le32_n(uint32_t *val, unsigned int num)
++{
++ while (num--) {
++ *val = make_le32(*val);
++ val++;
++ }
++}
++
++ /******************/
++ /* Portal access */
++ /******************/
++struct qbman_swp_sys {
++ /* On GPP, the sys support for qbman_swp is here. The CENA region isi
++ * not an mmap() of the real portal registers, but an allocated
++ * place-holder, because the actual writes/reads to/from the portal are
++ * marshalled from these allocated areas using QBMan's "MC access
++ * registers". CINH accesses are atomic so there's no need for a
++ * place-holder. */
++ void *cena;
++ void __iomem *addr_cena;
++ void __iomem *addr_cinh;
++};
++
++/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
++ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
++ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
++ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
++ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
++ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
++ */
++
++static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
++ uint32_t val)
++{
++
++ writel_relaxed(val, s->addr_cinh + offset);
++#ifdef QBMAN_CINH_TRACE
++ pr_info("qbman_cinh_write(%p:0x%03x) 0x%08x\n",
++ s->addr_cinh, offset, val);
++#endif
++}
++
++static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
++{
++ uint32_t reg = readl_relaxed(s->addr_cinh + offset);
++
++#ifdef QBMAN_CINH_TRACE
++ pr_info("qbman_cinh_read(%p:0x%03x) 0x%08x\n",
++ s->addr_cinh, offset, reg);
++#endif
++ return reg;
++}
++
++static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++ void *shadow = s->cena + offset;
++
++#ifdef QBMAN_CENA_TRACE
++ pr_info("qbman_cena_write_start(%p:0x%03x) %p\n",
++ s->addr_cena, offset, shadow);
++#endif
++ BUG_ON(offset & 63);
++ dcbz(shadow);
++ return shadow;
++}
++
++static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
++ uint32_t offset, void *cmd)
++{
++ const uint32_t *shadow = cmd;
++ int loop;
++
++#ifdef QBMAN_CENA_TRACE
++ pr_info("qbman_cena_write_complete(%p:0x%03x) %p\n",
++ s->addr_cena, offset, shadow);
++ hexdump(cmd, 64);
++#endif
++ for (loop = 15; loop >= 1; loop--)
++ writel_relaxed(shadow[loop], s->addr_cena +
++ offset + loop * 4);
++ lwsync();
++ writel_relaxed(shadow[0], s->addr_cena + offset);
++ dcbf(s->addr_cena + offset);
++}
++
++static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
++{
++ uint32_t *shadow = s->cena + offset;
++ unsigned int loop;
++
++#ifdef QBMAN_CENA_TRACE
++ pr_info("qbman_cena_read(%p:0x%03x) %p\n",
++ s->addr_cena, offset, shadow);
++#endif
++
++ for (loop = 0; loop < 16; loop++)
++ shadow[loop] = readl_relaxed(s->addr_cena + offset
++ + loop * 4);
++#ifdef QBMAN_CENA_TRACE
++ hexdump(shadow, 64);
++#endif
++ return shadow;
++}
++
++static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++ dcivac(s->addr_cena + offset);
++ prefetch_for_load(s->addr_cena + offset);
++}
++
++ /******************/
++ /* Portal support */
++ /******************/
++
++/* The SWP_CFG portal register is special, in that it is used by the
++ * platform-specific code rather than the platform-independent code in
++ * qbman_portal.c. So use of it is declared locally here. */
++#define QBMAN_CINH_SWP_CFG 0xd00
++
++/* For MC portal use, we always configure with
++ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4)
++ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x0)
++ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3)
++ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2)
++ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x3)
++ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- FALSE)
++ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE)
++ * SE is (SWP_CFG,3,1) - memory stashing enable (<- 0x0)
++ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE)
++ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- 0x0)
++ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- FALSE)
++ */
++static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
++ uint8_t est, uint8_t rpm, uint8_t dcm,
++ uint8_t epm, int sd, int sp, int se,
++ int dp, int de, int ep)
++{
++ uint32_t reg;
++
++ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) |
++ e32_uint8_t(16, 3, est) | e32_uint8_t(12, 2, rpm) |
++ e32_uint8_t(10, 2, dcm) | e32_uint8_t(8, 2, epm) |
++ e32_int(5, 1, sd) | e32_int(4, 1, sp) | e32_int(3, 1, se) |
++ e32_int(2, 1, dp) | e32_int(1, 1, de) | e32_int(0, 1, ep) |
++ e32_uint8_t(14, 1, wn);
++ return reg;
++}
++
++static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
++ const struct qbman_swp_desc *d,
++ uint8_t dqrr_size)
++{
++ uint32_t reg;
++
++ s->addr_cena = d->cena_bar;
++ s->addr_cinh = d->cinh_bar;
++ s->cena = (void *)get_zeroed_page(GFP_KERNEL);
++ if (!s->cena) {
++ pr_err("Could not allocate page for cena shadow\n");
++ return -1;
++ }
++
++#ifdef QBMAN_CHECKING
++ /* We should never be asked to initialise for a portal that isn't in
++ * the power-on state. (Ie. don't forget to reset portals when they are
++ * decommissioned!)
++ */
++ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
++ BUG_ON(reg);
++#endif
++ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 0, 1, 0, 1, 0, 0);
++ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
++ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
++ if (!reg) {
++ pr_err("The portal is not enabled!\n");
++ kfree(s->cena);
++ return -1;
++ }
++ return 0;
++}
++
++static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
++{
++ free_page((unsigned long)s->cena);
++}
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h
+@@ -0,0 +1,86 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/io.h>
++#include <linux/dma-mapping.h>
++#include <linux/bootmem.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/memblock.h>
++#include <linux/completion.h>
++#include <linux/log2.h>
++#include <linux/types.h>
++#include <linux/ioctl.h>
++#include <linux/device.h>
++#include <linux/smp.h>
++#include <linux/vmalloc.h>
++#include "fsl_qbman_base.h"
++
++/* The platform-independent code shouldn't need endianness, except for
++ * weird/fast-path cases like qbman_result_has_token(), which needs to
++ * perform a passive and endianness-specific test on a read-only data structure
++ * very quickly. It's an exception, and this symbol is used for that case. */
++#if defined(__BIG_ENDIAN)
++#define DQRR_TOK_OFFSET 0
++#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24
++#define SCN_STATE_OFFSET_IN_MEM 8
++#define SCN_RID_OFFSET_IN_MEM 8
++#else
++#define DQRR_TOK_OFFSET 24
++#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0
++#define SCN_STATE_OFFSET_IN_MEM 16
++#define SCN_RID_OFFSET_IN_MEM 0
++#endif
++
++/* Similarly-named functions */
++#define upper32(a) upper_32_bits(a)
++#define lower32(a) lower_32_bits(a)
++
++ /****************/
++ /* arch assists */
++ /****************/
++
++#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
++#define lwsync() { asm volatile("dmb st" : : : "memory"); }
++#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
++#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
++static inline void prefetch_for_load(void *p)
++{
++ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p));
++}
++static inline void prefetch_for_store(void *p)
++{
++ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p));
++}
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_test.c
+@@ -0,0 +1,664 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/io.h>
++#include <linux/module.h>
++
++#include "qbman_private.h"
++#include "fsl_qbman_portal.h"
++#include "qbman_debug.h"
++#include "../../include/fsl_dpaa2_fd.h"
++
++#define QBMAN_SWP_CENA_BASE 0x818000000
++#define QBMAN_SWP_CINH_BASE 0x81c000000
++
++#define QBMAN_PORTAL_IDX 2
++#define QBMAN_TEST_FQID 19
++#define QBMAN_TEST_BPID 23
++#define QBMAN_USE_QD
++#ifdef QBMAN_USE_QD
++#define QBMAN_TEST_QDID 1
++#endif
++#define QBMAN_TEST_LFQID 0xf00010
++
++#define NUM_EQ_FRAME 10
++#define NUM_DQ_FRAME 10
++#define NUM_DQ_IN_DQRR 5
++#define NUM_DQ_IN_MEM (NUM_DQ_FRAME - NUM_DQ_IN_DQRR)
++
++static struct qbman_swp *swp;
++static struct qbman_eq_desc eqdesc;
++static struct qbman_pull_desc pulldesc;
++static struct qbman_release_desc releasedesc;
++static struct qbman_eq_response eq_storage[1];
++static struct dpaa2_dq dq_storage[NUM_DQ_IN_MEM] __aligned(64);
++static dma_addr_t eq_storage_phys;
++static dma_addr_t dq_storage_phys;
++
++/* FQ ctx attribute values for the test code. */
++#define FQCTX_HI 0xabbaf00d
++#define FQCTX_LO 0x98765432
++#define FQ_VFQID 0x123456
++
++/* Sample frame descriptor */
++static struct qbman_fd_simple fd = {
++ .addr_lo = 0xbabaf33d,
++ .addr_hi = 0x01234567,
++ .len = 0x7777,
++ .frc = 0xdeadbeef,
++ .flc_lo = 0xcafecafe,
++ .flc_hi = 0xbeadabba
++};
++
++static void fd_inc(struct qbman_fd_simple *_fd)
++{
++ _fd->addr_lo += _fd->len;
++ _fd->flc_lo += 0x100;
++ _fd->frc += 0x10;
++}
++
++static int fd_cmp(struct qbman_fd *fda, struct qbman_fd *fdb)
++{
++ int i;
++
++ for (i = 0; i < 8; i++)
++ if (fda->words[i] - fdb->words[i])
++ return 1;
++ return 0;
++}
++
++struct qbman_fd fd_eq[NUM_EQ_FRAME];
++struct qbman_fd fd_dq[NUM_DQ_FRAME];
++
++/* "Buffers" to be released (and storage for buffers to be acquired) */
++static uint64_t rbufs[320];
++static uint64_t abufs[320];
++
++static void do_enqueue(struct qbman_swp *swp)
++{
++ int i, j, ret;
++
++#ifdef QBMAN_USE_QD
++ pr_info("*****QBMan_test: Enqueue %d frames to QD %d\n",
++ NUM_EQ_FRAME, QBMAN_TEST_QDID);
++#else
++ pr_info("*****QBMan_test: Enqueue %d frames to FQ %d\n",
++ NUM_EQ_FRAME, QBMAN_TEST_FQID);
++#endif
++ for (i = 0; i < NUM_EQ_FRAME; i++) {
++ /*********************************/
++ /* Prepare a enqueue descriptor */
++ /*********************************/
++ memset(eq_storage, 0, sizeof(eq_storage));
++ eq_storage_phys = virt_to_phys(eq_storage);
++ qbman_eq_desc_clear(&eqdesc);
++ qbman_eq_desc_set_no_orp(&eqdesc, 0);
++ qbman_eq_desc_set_response(&eqdesc, eq_storage_phys, 0);
++ qbman_eq_desc_set_token(&eqdesc, 0x99);
++#ifdef QBMAN_USE_QD
++ /**********************************/
++ /* Prepare a Queueing Destination */
++ /**********************************/
++ qbman_eq_desc_set_qd(&eqdesc, QBMAN_TEST_QDID, 0, 3);
++#else
++ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_FQID);
++#endif
++
++ /******************/
++ /* Try an enqueue */
++ /******************/
++ ret = qbman_swp_enqueue(swp, &eqdesc,
++ (const struct qbman_fd *)&fd);
++ BUG_ON(ret);
++ for (j = 0; j < 8; j++)
++ fd_eq[i].words[j] = *((uint32_t *)&fd + j);
++ fd_inc(&fd);
++ }
++}
++
++static void do_push_dequeue(struct qbman_swp *swp)
++{
++ int i, j;
++ const struct dpaa2_dq *dq_storage1;
++ const struct qbman_fd *__fd;
++ int loopvar;
++
++ pr_info("*****QBMan_test: Start push dequeue\n");
++ for (i = 0; i < NUM_DQ_FRAME; i++) {
++ DBG_POLL_START(loopvar);
++ do {
++ DBG_POLL_CHECK(loopvar);
++ dq_storage1 = qbman_swp_dqrr_next(swp);
++ } while (!dq_storage1);
++ if (dq_storage1) {
++ __fd = (const struct qbman_fd *)
++ dpaa2_dq_fd(dq_storage1);
++ for (j = 0; j < 8; j++)
++ fd_dq[i].words[j] = __fd->words[j];
++ if (fd_cmp(&fd_eq[i], &fd_dq[i])) {
++ pr_info("enqueue FD is\n");
++ hexdump(&fd_eq[i], 32);
++ pr_info("dequeue FD is\n");
++ hexdump(&fd_dq[i], 32);
++ }
++ qbman_swp_dqrr_consume(swp, dq_storage1);
++ } else {
++ pr_info("The push dequeue fails\n");
++ }
++ }
++}
++
++static void do_pull_dequeue(struct qbman_swp *swp)
++{
++ int i, j, ret;
++ const struct dpaa2_dq *dq_storage1;
++ const struct qbman_fd *__fd;
++ int loopvar;
++
++ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in DQRR\n",
++ NUM_DQ_IN_DQRR);
++ for (i = 0; i < NUM_DQ_IN_DQRR; i++) {
++ qbman_pull_desc_clear(&pulldesc);
++ qbman_pull_desc_set_storage(&pulldesc, NULL, 0, 0);
++ qbman_pull_desc_set_numframes(&pulldesc, 1);
++ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID);
++
++ ret = qbman_swp_pull(swp, &pulldesc);
++ BUG_ON(ret);
++ DBG_POLL_START(loopvar);
++ do {
++ DBG_POLL_CHECK(loopvar);
++ dq_storage1 = qbman_swp_dqrr_next(swp);
++ } while (!dq_storage1);
++
++ if (dq_storage1) {
++ __fd = (const struct qbman_fd *)
++ dpaa2_dq_fd(dq_storage1);
++ for (j = 0; j < 8; j++)
++ fd_dq[i].words[j] = __fd->words[j];
++ if (fd_cmp(&fd_eq[i], &fd_dq[i])) {
++ pr_info("enqueue FD is\n");
++ hexdump(&fd_eq[i], 32);
++ pr_info("dequeue FD is\n");
++ hexdump(&fd_dq[i], 32);
++ }
++ qbman_swp_dqrr_consume(swp, dq_storage1);
++ } else {
++ pr_info("Dequeue with dq entry in DQRR fails\n");
++ }
++ }
++
++ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in memory\n",
++ NUM_DQ_IN_MEM);
++ for (i = 0; i < NUM_DQ_IN_MEM; i++) {
++ dq_storage_phys = virt_to_phys(&dq_storage[i]);
++ qbman_pull_desc_clear(&pulldesc);
++ qbman_pull_desc_set_storage(&pulldesc, &dq_storage[i],
++ dq_storage_phys, 1);
++ qbman_pull_desc_set_numframes(&pulldesc, 1);
++ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID);
++ ret = qbman_swp_pull(swp, &pulldesc);
++ BUG_ON(ret);
++
++ DBG_POLL_START(loopvar);
++ do {
++ DBG_POLL_CHECK(loopvar);
++ ret = qbman_result_has_new_result(swp,
++ &dq_storage[i]);
++ } while (!ret);
++
++ if (ret) {
++ for (j = 0; j < 8; j++)
++ fd_dq[i + NUM_DQ_IN_DQRR].words[j] =
++ dq_storage[i].dont_manipulate_directly[j + 8];
++ j = i + NUM_DQ_IN_DQRR;
++ if (fd_cmp(&fd_eq[j], &fd_dq[j])) {
++ pr_info("enqueue FD is\n");
++ hexdump(&fd_eq[i + NUM_DQ_IN_DQRR], 32);
++ pr_info("dequeue FD is\n");
++ hexdump(&fd_dq[i + NUM_DQ_IN_DQRR], 32);
++ hexdump(&dq_storage[i], 64);
++ }
++ } else {
++ pr_info("Dequeue with dq entry in memory fails\n");
++ }
++ }
++}
++
++static void release_buffer(struct qbman_swp *swp, unsigned int num)
++{
++ int ret;
++ unsigned int i, j;
++
++ qbman_release_desc_clear(&releasedesc);
++ qbman_release_desc_set_bpid(&releasedesc, QBMAN_TEST_BPID);
++ pr_info("*****QBMan_test: Release %d buffers to BP %d\n",
++ num, QBMAN_TEST_BPID);
++ for (i = 0; i < (num / 7 + 1); i++) {
++ j = ((num - i * 7) > 7) ? 7 : (num - i * 7);
++ ret = qbman_swp_release(swp, &releasedesc, &rbufs[i * 7], j);
++ BUG_ON(ret);
++ }
++}
++
++static void acquire_buffer(struct qbman_swp *swp, unsigned int num)
++{
++ int ret;
++ unsigned int i, j;
++
++ pr_info("*****QBMan_test: Acquire %d buffers from BP %d\n",
++ num, QBMAN_TEST_BPID);
++
++ for (i = 0; i < (num / 7 + 1); i++) {
++ j = ((num - i * 7) > 7) ? 7 : (num - i * 7);
++ ret = qbman_swp_acquire(swp, QBMAN_TEST_BPID, &abufs[i * 7], j);
++ BUG_ON(ret != j);
++ }
++}
++
++static void buffer_pool_test(struct qbman_swp *swp)
++{
++ struct qbman_attr info;
++ struct dpaa2_dq *bpscn_message;
++ dma_addr_t bpscn_phys;
++ uint64_t bpscn_ctx;
++ uint64_t ctx = 0xbbccddaadeadbeefull;
++ int i, ret;
++ uint32_t hw_targ;
++
++ pr_info("*****QBMan_test: test buffer pool management\n");
++ ret = qbman_bp_query(swp, QBMAN_TEST_BPID, &info);
++ qbman_bp_attr_get_bpscn_addr(&info, &bpscn_phys);
++ pr_info("The bpscn is %llx, info_phys is %llx\n", bpscn_phys,
++ virt_to_phys(&info));
++ bpscn_message = phys_to_virt(bpscn_phys);
++
++ for (i = 0; i < 320; i++)
++ rbufs[i] = 0xf00dabba01234567ull + i * 0x40;
++
++ release_buffer(swp, 320);
++
++ pr_info("QBMan_test: query the buffer pool\n");
++ qbman_bp_query(swp, QBMAN_TEST_BPID, &info);
++ hexdump(&info, 64);
++ qbman_bp_attr_get_hw_targ(&info, &hw_targ);
++ pr_info("hw_targ is %d\n", hw_targ);
++
++ /* Acquire buffers to trigger BPSCN */
++ acquire_buffer(swp, 300);
++ /* BPSCN should be written to the memory */
++ qbman_bp_query(swp, QBMAN_TEST_BPID, &info);
++ hexdump(&info, 64);
++ hexdump(bpscn_message, 64);
++ BUG_ON(!qbman_result_is_BPSCN(bpscn_message));
++ /* There should be free buffers in the pool */
++ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message)));
++ /* Buffer pool is depleted */
++ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message));
++ /* The ctx should match */
++ bpscn_ctx = qbman_result_bpscn_ctx(bpscn_message);
++ pr_info("BPSCN test: ctx %llx, bpscn_ctx %llx\n", ctx, bpscn_ctx);
++ BUG_ON(ctx != bpscn_ctx);
++ memset(bpscn_message, 0, sizeof(struct dpaa2_dq));
++
++ /* Re-seed the buffer pool to trigger BPSCN */
++ release_buffer(swp, 240);
++ /* BPSCN should be written to the memory */
++ BUG_ON(!qbman_result_is_BPSCN(bpscn_message));
++ /* There should be free buffers in the pool */
++ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message)));
++ /* Buffer pool is not depleted */
++ BUG_ON(qbman_result_bpscn_is_depleted(bpscn_message));
++ memset(bpscn_message, 0, sizeof(struct dpaa2_dq));
++
++ acquire_buffer(swp, 260);
++ /* BPSCN should be written to the memory */
++ BUG_ON(!qbman_result_is_BPSCN(bpscn_message));
++ /* There should be free buffers in the pool while BPSCN generated */
++ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message)));
++ /* Buffer pool is depletion */
++ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message));
++}
++
++static void ceetm_test(struct qbman_swp *swp)
++{
++ int i, j, ret;
++
++ qbman_eq_desc_clear(&eqdesc);
++ qbman_eq_desc_set_no_orp(&eqdesc, 0);
++ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_LFQID);
++ pr_info("*****QBMan_test: Enqueue to LFQID %x\n",
++ QBMAN_TEST_LFQID);
++ for (i = 0; i < NUM_EQ_FRAME; i++) {
++ ret = qbman_swp_enqueue(swp, &eqdesc,
++ (const struct qbman_fd *)&fd);
++ BUG_ON(ret);
++ for (j = 0; j < 8; j++)
++ fd_eq[i].words[j] = *((uint32_t *)&fd + j);
++ fd_inc(&fd);
++ }
++}
++
++int qbman_test(void)
++{
++ struct qbman_swp_desc pd;
++ uint32_t reg;
++
++ pd.cena_bar = ioremap_cache_ns(QBMAN_SWP_CENA_BASE +
++ QBMAN_PORTAL_IDX * 0x10000, 0x10000);
++ pd.cinh_bar = ioremap(QBMAN_SWP_CINH_BASE +
++ QBMAN_PORTAL_IDX * 0x10000, 0x10000);
++
++ /* Detect whether the mc image is the test image with GPP setup */
++ reg = readl_relaxed(pd.cena_bar + 0x4);
++ if (reg != 0xdeadbeef) {
++ pr_err("The MC image doesn't have GPP test setup, stop!\n");
++ iounmap(pd.cena_bar);
++ iounmap(pd.cinh_bar);
++ return -1;
++ }
++
++ pr_info("*****QBMan_test: Init QBMan SWP %d\n", QBMAN_PORTAL_IDX);
++ swp = qbman_swp_init(&pd);
++ if (!swp) {
++ iounmap(pd.cena_bar);
++ iounmap(pd.cinh_bar);
++ return -1;
++ }
++
++ /*******************/
++ /* Enqueue frames */
++ /*******************/
++ do_enqueue(swp);
++
++ /*******************/
++ /* Do pull dequeue */
++ /*******************/
++ do_pull_dequeue(swp);
++
++ /*******************/
++ /* Enqueue frames */
++ /*******************/
++ qbman_swp_push_set(swp, 0, 1);
++ qbman_swp_fq_schedule(swp, QBMAN_TEST_FQID);
++ do_enqueue(swp);
++
++ /*******************/
++ /* Do push dequeue */
++ /*******************/
++ do_push_dequeue(swp);
++
++ /**************************/
++ /* Test buffer pool funcs */
++ /**************************/
++ buffer_pool_test(swp);
++
++ /******************/
++ /* CEETM test */
++ /******************/
++ ceetm_test(swp);
++
++ qbman_swp_finish(swp);
++ pr_info("*****QBMan_test: Kernel test Passed\n");
++ return 0;
++}
++
++/* user-space test-case, definitions:
++ *
++ * 1 portal only, using portal index 3.
++ */
++
++#include <linux/uaccess.h>
++#include <linux/ioctl.h>
++#include <linux/miscdevice.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++
++#define QBMAN_TEST_US_SWP 3 /* portal index for user space */
++
++#define QBMAN_TEST_MAGIC 'q'
++struct qbman_test_swp_ioctl {
++ unsigned long portal1_cinh;
++ unsigned long portal1_cena;
++};
++struct qbman_test_dma_ioctl {
++ unsigned long ptr;
++ uint64_t phys_addr;
++};
++
++struct qbman_test_priv {
++ int has_swp_map;
++ int has_dma_map;
++ unsigned long pgoff;
++};
++
++#define QBMAN_TEST_SWP_MAP \
++ _IOR(QBMAN_TEST_MAGIC, 0x01, struct qbman_test_swp_ioctl)
++#define QBMAN_TEST_SWP_UNMAP \
++ _IOR(QBMAN_TEST_MAGIC, 0x02, struct qbman_test_swp_ioctl)
++#define QBMAN_TEST_DMA_MAP \
++ _IOR(QBMAN_TEST_MAGIC, 0x03, struct qbman_test_dma_ioctl)
++#define QBMAN_TEST_DMA_UNMAP \
++ _IOR(QBMAN_TEST_MAGIC, 0x04, struct qbman_test_dma_ioctl)
++
++#define TEST_PORTAL1_CENA_PGOFF ((QBMAN_SWP_CENA_BASE + QBMAN_TEST_US_SWP * \
++ 0x10000) >> PAGE_SHIFT)
++#define TEST_PORTAL1_CINH_PGOFF ((QBMAN_SWP_CINH_BASE + QBMAN_TEST_US_SWP * \
++ 0x10000) >> PAGE_SHIFT)
++
++static int qbman_test_open(struct inode *inode, struct file *filp)
++{
++ struct qbman_test_priv *priv;
++
++ priv = kmalloc(sizeof(struct qbman_test_priv), GFP_KERNEL);
++ if (!priv)
++ return -EIO;
++ filp->private_data = priv;
++ priv->has_swp_map = 0;
++ priv->has_dma_map = 0;
++ priv->pgoff = 0;
++ return 0;
++}
++
++static int qbman_test_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ int ret;
++ struct qbman_test_priv *priv = filp->private_data;
++
++ BUG_ON(!priv);
++
++ if (vma->vm_pgoff == TEST_PORTAL1_CINH_PGOFF)
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++ else if (vma->vm_pgoff == TEST_PORTAL1_CENA_PGOFF)
++ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot);
++ else if (vma->vm_pgoff == priv->pgoff)
++ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
++ else {
++ pr_err("Damn, unrecognised pg_off!!\n");
++ return -EINVAL;
++ }
++ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot);
++ return ret;
++}
++
++static long qbman_test_ioctl(struct file *fp, unsigned int cmd,
++ unsigned long arg)
++{
++ void __user *a = (void __user *)arg;
++ unsigned long longret, populate;
++ int ret = 0;
++ struct qbman_test_priv *priv = fp->private_data;
++
++ BUG_ON(!priv);
++
++ switch (cmd) {
++ case QBMAN_TEST_SWP_MAP:
++ {
++ struct qbman_test_swp_ioctl params;
++
++ if (priv->has_swp_map)
++ return -EINVAL;
++ down_write(¤t->mm->mmap_sem);
++ /* Map portal1 CINH */
++ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000,
++ PROT_READ | PROT_WRITE, MAP_SHARED,
++ TEST_PORTAL1_CINH_PGOFF, &populate);
++ if (longret & ~PAGE_MASK) {
++ ret = (int)longret;
++ goto out;
++ }
++ params.portal1_cinh = longret;
++ /* Map portal1 CENA */
++ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000,
++ PROT_READ | PROT_WRITE, MAP_SHARED,
++ TEST_PORTAL1_CENA_PGOFF, &populate);
++ if (longret & ~PAGE_MASK) {
++ ret = (int)longret;
++ goto out;
++ }
++ params.portal1_cena = longret;
++ priv->has_swp_map = 1;
++out:
++ up_write(¤t->mm->mmap_sem);
++ if (!ret && copy_to_user(a, ¶ms, sizeof(params)))
++ return -EFAULT;
++ return ret;
++ }
++ case QBMAN_TEST_SWP_UNMAP:
++ {
++ struct qbman_test_swp_ioctl params;
++
++ if (!priv->has_swp_map)
++ return -EINVAL;
++
++ if (copy_from_user(¶ms, a, sizeof(params)))
++ return -EFAULT;
++ down_write(¤t->mm->mmap_sem);
++ do_munmap(current->mm, params.portal1_cena, 0x10000);
++ do_munmap(current->mm, params.portal1_cinh, 0x10000);
++ up_write(¤t->mm->mmap_sem);
++ priv->has_swp_map = 0;
++ return 0;
++ }
++ case QBMAN_TEST_DMA_MAP:
++ {
++ struct qbman_test_dma_ioctl params;
++ void *vaddr;
++
++ if (priv->has_dma_map)
++ return -EINVAL;
++ vaddr = (void *)get_zeroed_page(GFP_KERNEL);
++ params.phys_addr = virt_to_phys(vaddr);
++ priv->pgoff = (unsigned long)params.phys_addr >> PAGE_SHIFT;
++ down_write(¤t->mm->mmap_sem);
++ longret = do_mmap_pgoff(fp, PAGE_SIZE, PAGE_SIZE,
++ PROT_READ | PROT_WRITE, MAP_SHARED,
++ priv->pgoff, &populate);
++ if (longret & ~PAGE_MASK) {
++ ret = (int)longret;
++ return ret;
++ }
++ params.ptr = longret;
++ priv->has_dma_map = 1;
++ up_write(¤t->mm->mmap_sem);
++ if (copy_to_user(a, ¶ms, sizeof(params)))
++ return -EFAULT;
++ return 0;
++ }
++ case QBMAN_TEST_DMA_UNMAP:
++ {
++ struct qbman_test_dma_ioctl params;
++
++ if (!priv->has_dma_map)
++ return -EINVAL;
++ if (copy_from_user(¶ms, a, sizeof(params)))
++ return -EFAULT;
++ down_write(¤t->mm->mmap_sem);
++ do_munmap(current->mm, params.ptr, PAGE_SIZE);
++ up_write(¤t->mm->mmap_sem);
++ free_page((unsigned long)phys_to_virt(params.phys_addr));
++ priv->has_dma_map = 0;
++ return 0;
++ }
++ default:
++ pr_err("Bad ioctl cmd!\n");
++ }
++ return -EINVAL;
++}
++
++static const struct file_operations qbman_fops = {
++ .open = qbman_test_open,
++ .mmap = qbman_test_mmap,
++ .unlocked_ioctl = qbman_test_ioctl
++};
++
++static struct miscdevice qbman_miscdev = {
++ .name = "qbman-test",
++ .fops = &qbman_fops,
++ .minor = MISC_DYNAMIC_MINOR,
++};
++
++static int qbman_miscdev_init;
++
++static int test_init(void)
++{
++ int ret = qbman_test();
++
++ if (!ret) {
++ /* MC image supports the test cases, so instantiate the
++ * character devic that the user-space test case will use to do
++ * its memory mappings. */
++ ret = misc_register(&qbman_miscdev);
++ if (ret) {
++ pr_err("qbman-test: failed to register misc device\n");
++ return ret;
++ }
++ pr_info("qbman-test: misc device registered!\n");
++ qbman_miscdev_init = 1;
++ }
++ return 0;
++}
++
++static void test_exit(void)
++{
++ if (qbman_miscdev_init) {
++ misc_deregister(&qbman_miscdev);
++ qbman_miscdev_init = 0;
++ }
++}
++
++module_init(test_init);
++module_exit(test_exit);
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h
+@@ -0,0 +1,774 @@
++/* Copyright 2014 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPAA2_FD_H
++#define __FSL_DPAA2_FD_H
++
++/**
++ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
++ *
++ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2.
++ * Frames can be enqueued and dequeued to Frame Queues which are consumed
++ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE)
++ *
++ * There are three types of frames: Single, Scatter Gather and Frame Lists.
++ *
++ * The set of APIs in this file must be used to create, manipulate and
++ * query Frame Descriptor.
++ *
++ */
++
++/**
++ * struct dpaa2_fd - Place-holder for FDs.
++ * @words: for easier/faster copying the whole FD structure.
++ * @addr_lo: the lower 32 bits of the address in FD.
++ * @addr_hi: the upper 32 bits of the address in FD.
++ * @len: the length field in FD.
++ * @bpid_offset: represent the bpid and offset fields in FD
++ * @frc: frame context
++ * @ctrl: the 32bit control bits including dd, sc,... va, err.
++ * @flc_lo: the lower 32bit of flow context.
++ * @flc_hi: the upper 32bits of flow context.
++ *
++ * This structure represents the basic Frame Descriptor used in the system.
++ * We represent it via the simplest form that we need for now. Different
++ * overlays may be needed to support different options, etc. (It is impractical
++ * to define One True Struct, because the resulting encoding routines (lots of
++ * read-modify-writes) would be worst-case performance whether or not
++ * circumstances required them.)
++ */
++struct dpaa2_fd {
++ union {
++ u32 words[8];
++ struct dpaa2_fd_simple {
++ u32 addr_lo;
++ u32 addr_hi;
++ u32 len;
++ /* offset in the MS 16 bits, BPID in the LS 16 bits */
++ u32 bpid_offset;
++ u32 frc; /* frame context */
++ /* "err", "va", "cbmt", "asal", [...] */
++ u32 ctrl;
++ /* flow context */
++ u32 flc_lo;
++ u32 flc_hi;
++ } simple;
++ };
++};
++
++enum dpaa2_fd_format {
++ dpaa2_fd_single = 0,
++ dpaa2_fd_list,
++ dpaa2_fd_sg
++};
++
++/* Accessors for SG entry fields
++ *
++ * These setters and getters assume little endian format. For converting
++ * between LE and cpu endianness, the specific conversion functions must be
++ * called before the SGE contents are accessed by the core (on Rx),
++ * respectively before the SG table is sent to hardware (on Tx)
++ */
++
++/**
++ * dpaa2_fd_get_addr() - get the addr field of frame descriptor
++ * @fd: the given frame descriptor.
++ *
++ * Return the address in the frame descriptor.
++ */
++static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd)
++{
++ return (dma_addr_t)((((uint64_t)fd->simple.addr_hi) << 32)
++ + fd->simple.addr_lo);
++}
++
++/**
++ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor
++ * @fd: the given frame descriptor.
++ * @addr: the address needs to be set in frame descriptor.
++ */
++static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr)
++{
++ fd->simple.addr_hi = upper_32_bits(addr);
++ fd->simple.addr_lo = lower_32_bits(addr);
++}
++
++/**
++ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor
++ * @fd: the given frame descriptor.
++ *
++ * Return the frame context field in the frame descriptor.
++ */
++static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd)
++{
++ return fd->simple.frc;
++}
++
++/**
++ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor
++ * @fd: the given frame descriptor.
++ * @frc: the frame context needs to be set in frame descriptor.
++ */
++static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc)
++{
++ fd->simple.frc = frc;
++}
++
++/**
++ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor
++ * @fd: the given frame descriptor.
++ *
++ * Return the flow context in the frame descriptor.
++ */
++static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd)
++{
++ return (dma_addr_t)((((uint64_t)fd->simple.flc_hi) << 32) +
++ fd->simple.flc_lo);
++}
++
++/**
++ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor
++ * @fd: the given frame descriptor.
++ * @flc_addr: the flow context needs to be set in frame descriptor.
++ */
++static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr)
++{
++ fd->simple.flc_hi = upper_32_bits(flc_addr);
++ fd->simple.flc_lo = lower_32_bits(flc_addr);
++}
++
++/**
++ * dpaa2_fd_get_len() - Get the length in the frame descriptor
++ * @fd: the given frame descriptor.
++ *
++ * Return the length field in the frame descriptor.
++ */
++static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd)
++{
++ return fd->simple.len;
++}
++
++/**
++ * dpaa2_fd_set_len() - Set the length field of frame descriptor
++ * @fd: the given frame descriptor.
++ * @len: the length needs to be set in frame descriptor.
++ */
++static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len)
++{
++ fd->simple.len = len;
++}
++
++/**
++ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor
++ * @fd: the given frame descriptor.
++ *
++ * Return the offset.
++ */
++static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd)
++{
++ return (uint16_t)(fd->simple.bpid_offset >> 16) & 0x0FFF;
++}
++
++/**
++ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor
++ *
++ * @fd: the given frame descriptor.
++ * @offset: the offset needs to be set in frame descriptor.
++ */
++static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset)
++{
++ fd->simple.bpid_offset &= 0xF000FFFF;
++ fd->simple.bpid_offset |= (u32)offset << 16;
++}
++
++/**
++ * dpaa2_fd_get_format() - Get the format field in the frame descriptor
++ * @fd: the given frame descriptor.
++ *
++ * Return the format.
++ */
++static inline enum dpaa2_fd_format dpaa2_fd_get_format(
++ const struct dpaa2_fd *fd)
++{
++ return (enum dpaa2_fd_format)((fd->simple.bpid_offset >> 28) & 0x3);
++}
++
++/**
++ * dpaa2_fd_set_format() - Set the format field of frame descriptor
++ *
++ * @fd: the given frame descriptor.
++ * @format: the format needs to be set in frame descriptor.
++ */
++static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd,
++ enum dpaa2_fd_format format)
++{
++ fd->simple.bpid_offset &= 0xCFFFFFFF;
++ fd->simple.bpid_offset |= (u32)format << 28;
++}
++
++/**
++ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor
++ * @fd: the given frame descriptor.
++ *
++ * Return the bpid.
++ */
++static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd)
++{
++ return (uint16_t)(fd->simple.bpid_offset & 0xFFFF);
++}
++
++/**
++ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor
++ *
++ * @fd: the given frame descriptor.
++ * @bpid: the bpid needs to be set in frame descriptor.
++ */
++static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid)
++{
++ fd->simple.bpid_offset &= 0xFFFF0000;
++ fd->simple.bpid_offset |= (u32)bpid;
++}
++
++/**
++ * struct dpaa2_sg_entry - the scatter-gathering structure
++ * @addr_lo: the lower 32bit of address
++ * @addr_hi: the upper 32bit of address
++ * @len: the length in this sg entry.
++ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits.
++ */
++struct dpaa2_sg_entry {
++ u32 addr_lo;
++ u32 addr_hi;
++ u32 len;
++ u32 bpid_offset;
++};
++
++enum dpaa2_sg_format {
++ dpaa2_sg_single = 0,
++ dpaa2_sg_frame_data,
++ dpaa2_sg_sgt_ext
++};
++
++/**
++ * dpaa2_sg_get_addr() - Get the address from SG entry
++ * @sg: the given scatter-gathering object.
++ *
++ * Return the address.
++ */
++static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg)
++{
++ return (dma_addr_t)((((u64)sg->addr_hi) << 32) + sg->addr_lo);
++}
++
++/**
++ * dpaa2_sg_set_addr() - Set the address in SG entry
++ * @sg: the given scatter-gathering object.
++ * @addr: the address to be set.
++ */
++static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr)
++{
++ sg->addr_hi = upper_32_bits(addr);
++ sg->addr_lo = lower_32_bits(addr);
++}
++
++
++static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg)
++{
++ return (sg->bpid_offset >> 30) & 0x1;
++}
++
++/**
++ * dpaa2_sg_get_len() - Get the length in SG entry
++ * @sg: the given scatter-gathering object.
++ *
++ * Return the length.
++ */
++static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg)
++{
++ if (dpaa2_sg_short_len(sg))
++ return sg->len & 0x1FFFF;
++ return sg->len;
++}
++
++/**
++ * dpaa2_sg_set_len() - Set the length in SG entry
++ * @sg: the given scatter-gathering object.
++ * @len: the length to be set.
++ */
++static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len)
++{
++ sg->len = len;
++}
++
++/**
++ * dpaa2_sg_get_offset() - Get the offset in SG entry
++ * @sg: the given scatter-gathering object.
++ *
++ * Return the offset.
++ */
++static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg)
++{
++ return (u16)(sg->bpid_offset >> 16) & 0x0FFF;
++}
++
++/**
++ * dpaa2_sg_set_offset() - Set the offset in SG entry
++ * @sg: the given scatter-gathering object.
++ * @offset: the offset to be set.
++ */
++static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg,
++ u16 offset)
++{
++ sg->bpid_offset &= 0xF000FFFF;
++ sg->bpid_offset |= (u32)offset << 16;
++}
++
++/**
++ * dpaa2_sg_get_format() - Get the SG format in SG entry
++ * @sg: the given scatter-gathering object.
++ *
++ * Return the format.
++ */
++static inline enum dpaa2_sg_format
++ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg)
++{
++ return (enum dpaa2_sg_format)((sg->bpid_offset >> 28) & 0x3);
++}
++
++/**
++ * dpaa2_sg_set_format() - Set the SG format in SG entry
++ * @sg: the given scatter-gathering object.
++ * @format: the format to be set.
++ */
++static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg,
++ enum dpaa2_sg_format format)
++{
++ sg->bpid_offset &= 0xCFFFFFFF;
++ sg->bpid_offset |= (u32)format << 28;
++}
++
++/**
++ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry
++ * @sg: the given scatter-gathering object.
++ *
++ * Return the bpid.
++ */
++static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg)
++{
++ return (u16)(sg->bpid_offset & 0x3FFF);
++}
++
++/**
++ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry
++ * @sg: the given scatter-gathering object.
++ * @bpid: the bpid to be set.
++ */
++static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid)
++{
++ sg->bpid_offset &= 0xFFFFC000;
++ sg->bpid_offset |= (u32)bpid;
++}
++
++/**
++ * dpaa2_sg_is_final() - Check final bit in SG entry
++ * @sg: the given scatter-gathering object.
++ *
++ * Return bool.
++ */
++static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg)
++{
++ return !!(sg->bpid_offset >> 31);
++}
++
++/**
++ * dpaa2_sg_set_final() - Set the final bit in SG entry
++ * @sg: the given scatter-gathering object.
++ * @final: the final boolean to be set.
++ */
++static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
++{
++ sg->bpid_offset &= 0x7FFFFFFF;
++ sg->bpid_offset |= (u32)final << 31;
++}
++
++/* Endianness conversion helper functions
++ * The accelerator drivers which construct / read scatter gather entries
++ * need to call these in order to account for endianness mismatches between
++ * hardware and cpu
++ */
++#ifdef __BIG_ENDIAN
++/**
++ * dpaa2_sg_cpu_to_le() - convert scatter gather entry from native cpu
++ * format little endian format.
++ * @sg: the given scatter gather entry.
++ */
++static inline void dpaa2_sg_cpu_to_le(struct dpaa2_sg_entry *sg)
++{
++ uint32_t *p = (uint32_t *)sg;
++ int i;
++
++ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++)
++ cpu_to_le32s(p++);
++}
++
++/**
++ * dpaa2_sg_le_to_cpu() - convert scatter gather entry from little endian
++ * format to native cpu format.
++ * @sg: the given scatter gather entry.
++ */
++static inline void dpaa2_sg_le_to_cpu(struct dpaa2_sg_entry *sg)
++{
++ uint32_t *p = (uint32_t *)sg;
++ int i;
++
++ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++)
++ le32_to_cpus(p++);
++}
++#else
++#define dpaa2_sg_cpu_to_le(sg)
++#define dpaa2_sg_le_to_cpu(sg)
++#endif /* __BIG_ENDIAN */
++
++
++/**
++ * struct dpaa2_fl_entry - structure for frame list entry.
++ * @addr_lo: the lower 32bit of address
++ * @addr_hi: the upper 32bit of address
++ * @len: the length in this sg entry.
++ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits.
++ * @frc: frame context
++ * @ctrl: the 32bit control bits including dd, sc,... va, err.
++ * @flc_lo: the lower 32bit of flow context.
++ * @flc_hi: the upper 32bits of flow context.
++ *
++ * Frame List Entry (FLE)
++ * Identical to dpaa2_fd.simple layout, but some bits are different
++ */
++struct dpaa2_fl_entry {
++ u32 addr_lo;
++ u32 addr_hi;
++ u32 len;
++ u32 bpid_offset;
++ u32 frc;
++ u32 ctrl;
++ u32 flc_lo;
++ u32 flc_hi;
++};
++
++enum dpaa2_fl_format {
++ dpaa2_fl_single = 0,
++ dpaa2_fl_res,
++ dpaa2_fl_sg
++};
++
++/**
++ * dpaa2_fl_get_addr() - Get address in the frame list entry
++ * @fle: the given frame list entry.
++ *
++ * Return address for the get function.
++ */
++static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle)
++{
++ return (dma_addr_t)((((uint64_t)fle->addr_hi) << 32) + fle->addr_lo);
++}
++
++/**
++ * dpaa2_fl_set_addr() - Set the address in the frame list entry
++ * @fle: the given frame list entry.
++ * @addr: the address needs to be set.
++ *
++ */
++static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle,
++ dma_addr_t addr)
++{
++ fle->addr_hi = upper_32_bits(addr);
++ fle->addr_lo = lower_32_bits(addr);
++}
++
++/**
++ * dpaa2_fl_get_flc() - Get the flow context in the frame list entry
++ * @fle: the given frame list entry.
++ *
++ * Return flow context for the get function.
++ */
++static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle)
++{
++ return (dma_addr_t)((((uint64_t)fle->flc_hi) << 32) + fle->flc_lo);
++}
++
++/**
++ * dpaa2_fl_set_flc() - Set the flow context in the frame list entry
++ * @fle: the given frame list entry.
++ * @flc_addr: the flow context address needs to be set.
++ *
++ */
++static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle,
++ dma_addr_t flc_addr)
++{
++ fle->flc_hi = upper_32_bits(flc_addr);
++ fle->flc_lo = lower_32_bits(flc_addr);
++}
++
++/**
++ * dpaa2_fl_get_len() - Get the length in the frame list entry
++ * @fle: the given frame list entry.
++ *
++ * Return length for the get function.
++ */
++static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle)
++{
++ return fle->len;
++}
++
++/**
++ * dpaa2_fl_set_len() - Set the length in the frame list entry
++ * @fle: the given frame list entry.
++ * @len: the length needs to be set.
++ *
++ */
++static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len)
++{
++ fle->len = len;
++}
++
++/**
++ * dpaa2_fl_get_offset() - Get/Set the offset in the frame list entry
++ * @fle: the given frame list entry.
++ *
++ * Return offset for the get function.
++ */
++static inline uint16_t dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle)
++{
++ return (uint16_t)(fle->bpid_offset >> 16) & 0x0FFF;
++}
++
++/**
++ * dpaa2_fl_set_offset() - Set the offset in the frame list entry
++ * @fle: the given frame list entry.
++ * @offset: the offset needs to be set.
++ *
++ */
++static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle,
++ uint16_t offset)
++{
++ fle->bpid_offset &= 0xF000FFFF;
++ fle->bpid_offset |= (u32)(offset & 0x0FFF) << 16;
++}
++
++/**
++ * dpaa2_fl_get_format() - Get the format in the frame list entry
++ * @fle: the given frame list entry.
++ *
++ * Return frame list format for the get function.
++ */
++static inline enum dpaa2_fl_format dpaa2_fl_get_format(
++ const struct dpaa2_fl_entry *fle)
++{
++ return (enum dpaa2_fl_format)((fle->bpid_offset >> 28) & 0x3);
++}
++
++/**
++ * dpaa2_fl_set_format() - Set the format in the frame list entry
++ * @fle: the given frame list entry.
++ * @format: the frame list format needs to be set.
++ *
++ */
++static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle,
++ enum dpaa2_fl_format format)
++{
++ fle->bpid_offset &= 0xCFFFFFFF;
++ fle->bpid_offset |= (u32)(format & 0x3) << 28;
++}
++
++/**
++ * dpaa2_fl_get_bpid() - Get the buffer pool id in the frame list entry
++ * @fle: the given frame list entry.
++ *
++ * Return bpid for the get function.
++ */
++static inline uint16_t dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle)
++{
++ return (uint16_t)(fle->bpid_offset & 0x3FFF);
++}
++
++/**
++ * dpaa2_fl_set_bpid() - Set the buffer pool id in the frame list entry
++ * @fle: the given frame list entry.
++ * @bpid: the buffer pool id needs to be set.
++ *
++ */
++static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, uint16_t bpid)
++{
++ fle->bpid_offset &= 0xFFFFC000;
++ fle->bpid_offset |= (u32)bpid;
++}
++
++/** dpaa2_fl_is_final() - check the final bit is set or not in the frame list.
++ * @fle: the given frame list entry.
++ *
++ * Return final bit settting.
++ */
++static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle)
++{
++ return !!(fle->bpid_offset >> 31);
++}
++
++/**
++ * dpaa2_fl_set_final() - Set the final bit in the frame list entry
++ * @fle: the given frame list entry.
++ * @final: the final bit needs to be set.
++ *
++ */
++static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final)
++{
++ fle->bpid_offset &= 0x7FFFFFFF;
++ fle->bpid_offset |= (u32)final << 31;
++}
++
++/**
++ * struct dpaa2_dq - the qman result structure
++ * @dont_manipulate_directly: the 16 32bit data to represent the whole
++ * possible qman dequeue result.
++ *
++ * When frames are dequeued, the FDs show up inside "dequeue" result structures
++ * (if at all, not all dequeue results contain valid FDs). This structure type
++ * is intentionally defined without internal detail, and the only reason it
++ * isn't declared opaquely (without size) is to allow the user to provide
++ * suitably-sized (and aligned) memory for these entries.
++ */
++struct dpaa2_dq {
++ uint32_t dont_manipulate_directly[16];
++};
++
++/* Parsing frame dequeue results */
++/* FQ empty */
++#define DPAA2_DQ_STAT_FQEMPTY 0x80
++/* FQ held active */
++#define DPAA2_DQ_STAT_HELDACTIVE 0x40
++/* FQ force eligible */
++#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20
++/* Valid frame */
++#define DPAA2_DQ_STAT_VALIDFRAME 0x10
++/* FQ ODP enable */
++#define DPAA2_DQ_STAT_ODPVALID 0x04
++/* Volatile dequeue */
++#define DPAA2_DQ_STAT_VOLATILE 0x02
++/* volatile dequeue command is expired */
++#define DPAA2_DQ_STAT_EXPIRED 0x01
++
++/**
++ * dpaa2_dq_flags() - Get the stat field of dequeue response
++ * @dq: the dequeue result.
++ */
++uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq);
++
++/**
++ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull
++ * command.
++ * @dq: the dequeue result.
++ *
++ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
++ */
++static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq)
++{
++ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE);
++}
++
++/**
++ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed.
++ * @dq: the dequeue result.
++ *
++ * Return boolean.
++ */
++static inline int dpaa2_dq_is_pull_complete(
++ const struct dpaa2_dq *dq)
++{
++ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED);
++}
++
++/**
++ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response
++ * seqnum is valid only if VALIDFRAME flag is TRUE
++ * @dq: the dequeue result.
++ *
++ * Return seqnum.
++ */
++uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq);
++
++/**
++ * dpaa2_dq_odpid() - Get the seqnum field in dequeue response
++ * odpid is valid only if ODPVAILD flag is TRUE.
++ * @dq: the dequeue result.
++ *
++ * Return odpid.
++ */
++uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq);
++
++/**
++ * dpaa2_dq_fqid() - Get the fqid in dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return fqid.
++ */
++uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq);
++
++/**
++ * dpaa2_dq_byte_count() - Get the byte count in dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return the byte count remaining in the FQ.
++ */
++uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq);
++
++/**
++ * dpaa2_dq_frame_count() - Get the frame count in dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return the frame count remaining in the FQ.
++ */
++uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq);
++
++/**
++ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return the frame queue context.
++ */
++uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq);
++
++/**
++ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return the frame descriptor.
++ */
++const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq);
++
++#endif /* __FSL_DPAA2_FD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h
+@@ -0,0 +1,619 @@
++/* Copyright 2014 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPAA2_IO_H
++#define __FSL_DPAA2_IO_H
++
++#include "fsl_dpaa2_fd.h"
++
++struct dpaa2_io;
++struct dpaa2_io_store;
++
++/**
++ * DOC: DPIO Service Management
++ *
++ * The DPIO service provides APIs for users to interact with the datapath
++ * by enqueueing and dequeing frame descriptors.
++ *
++ * The following set of APIs can be used to enqueue and dequeue frames
++ * as well as producing notification callbacks when data is available
++ * for dequeue.
++ */
++
++/**
++ * struct dpaa2_io_desc - The DPIO descriptor.
++ * @receives_notifications: Use notificaton mode.
++ * @has_irq: use irq-based proessing.
++ * @will_poll: use poll processing.
++ * @has_8prio: set for channel with 8 priority WQs.
++ * @cpu: the cpu index that at least interrupt handlers will execute on.
++ * @stash_affinity: the stash affinity for this portal favour 'cpu'
++ * @regs_cena: the cache enabled regs.
++ * @regs_cinh: the cache inhibited regs.
++ * @dpio_id: The dpio index.
++ * @qman_version: the qman version
++ *
++ * Describe the attributes and features of the DPIO object.
++ */
++struct dpaa2_io_desc {
++ /* non-zero iff the DPIO has a channel */
++ int receives_notifications;
++ /* non-zero if the DPIO portal interrupt is handled. If so, the
++ * caller/OS handles the interrupt and calls dpaa2_io_service_irq(). */
++ int has_irq;
++ /* non-zero if the caller/OS is prepared to called the
++ * dpaa2_io_service_poll() routine as part of its run-to-completion (or
++ * scheduling) loop. If so, the DPIO service may dynamically switch some
++ * of its processing between polling-based and irq-based. It is illegal
++ * combination to have (!has_irq && !will_poll). */
++ int will_poll;
++ /* ignored unless 'receives_notifications'. Non-zero iff the channel has
++ * 8 priority WQs, otherwise the channel has 2. */
++ int has_8prio;
++ /* the cpu index that at least interrupt handlers will execute on. And
++ * if 'stash_affinity' is non-zero, the cache targeted by stash
++ * transactions is affine to this cpu. */
++ int cpu;
++ /* non-zero if stash transactions for this portal favour 'cpu' over
++ * other CPUs. (Eg. zero if there's no stashing, or stashing is to
++ * shared cache.) */
++ int stash_affinity;
++ /* Caller-provided flags, determined by bus-scanning and/or creation of
++ * DPIO objects via MC commands. */
++ void *regs_cena;
++ void *regs_cinh;
++ int dpio_id;
++ uint32_t qman_version;
++};
++
++/**
++ * dpaa2_io_create() - create a dpaa2_io object.
++ * @desc: the dpaa2_io descriptor
++ *
++ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
++ * DPIO object. This handle can be used on it's own (like a one-portal "DPIO
++ * service") or later be added to a service-type "struct dpaa2_io" object. Note,
++ * the information required on 'cfg' is copied so the caller is free to do as
++ * they wish with the input parameter upon return.
++ *
++ * Return a valid dpaa2_io object for success, or NULL for failure.
++ */
++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
++
++/**
++ * dpaa2_io_create_service() - Create an (initially empty) DPIO service.
++ *
++ * Return a valid dpaa2_io object for success, or NULL for failure.
++ */
++struct dpaa2_io *dpaa2_io_create_service(void);
++
++/**
++ * dpaa2_io_default_service() - Use the driver's own global (and initially
++ * empty) DPIO service.
++ *
++ * This increments the reference count, so don't forget to use dpaa2_io_down()
++ * for each time this function is called.
++ *
++ * Return a valid dpaa2_io object for success, or NULL for failure.
++ */
++struct dpaa2_io *dpaa2_io_default_service(void);
++
++/**
++ * dpaa2_io_down() - release the dpaa2_io object.
++ * @d: the dpaa2_io object to be released.
++ *
++ * The "struct dpaa2_io" type can represent an individual DPIO object (as
++ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
++ * which can be used to group/encapsulate multiple DPIO objects. In all cases,
++ * each handle obtained should be released using this function.
++ */
++void dpaa2_io_down(struct dpaa2_io *d);
++
++/**
++ * dpaa2_io_service_add() - Add the given DPIO object to the given DPIO service.
++ * @service: the given DPIO service.
++ * @obj: the given DPIO object.
++ *
++ * 'service' must have been created by dpaa2_io_create_service() and 'obj'
++ * must have been created by dpaa2_io_create(). This increments the reference
++ * count on the object that 'obj' refers to, so the user could call
++ * dpaa2_io_down(obj) after this and the object will persist within the service
++ * (and will be destroyed when the service is destroyed).
++ *
++ * Return 0 for success, or -EINVAL for failure.
++ */
++int dpaa2_io_service_add(struct dpaa2_io *service, struct dpaa2_io *obj);
++
++/**
++ * dpaa2_io_get_descriptor() - Get the DPIO descriptor of the given DPIO object.
++ * @obj: the given DPIO object.
++ * @desc: the returned DPIO descriptor.
++ *
++ * This function will return failure if the given dpaa2_io struct represents a
++ * service rather than an individual DPIO object, otherwise it returns zero and
++ * the given 'cfg' structure is filled in.
++ *
++ * Return 0 for success, or -EINVAL for failure.
++ */
++int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc);
++
++/**
++ * dpaa2_io_poll() - Process any notifications and h/w-initiated events that
++ * are polling-driven.
++ * @obj: the given DPIO object.
++ *
++ * Obligatory for DPIO objects that have dpaa2_io_desc::will_poll non-zero.
++ *
++ * Return 0 for success, or -EINVAL for failure.
++ */
++int dpaa2_io_poll(struct dpaa2_io *obj);
++
++/**
++ * dpaa2_io_irq() - Process any notifications and h/w-initiated events that are
++ * irq-driven.
++ * @obj: the given DPIO object.
++ *
++ * Obligatory for DPIO objects that have dpaa2_io_desc::has_irq non-zero.
++ *
++ * Return IRQ_HANDLED for success, or -EINVAL for failure.
++ */
++int dpaa2_io_irq(struct dpaa2_io *obj);
++
++/**
++ * dpaa2_io_pause_poll() - Used to stop polling.
++ * @obj: the given DPIO object.
++ *
++ * If a polling application is going to stop polling for a period of time and
++ * supports interrupt processing, it can call this function to convert all
++ * processing to IRQ. (Eg. when sleeping.)
++ *
++ * Return -EINVAL.
++ */
++int dpaa2_io_pause_poll(struct dpaa2_io *obj);
++
++/**
++ * dpaa2_io_resume_poll() - Resume polling
++ * @obj: the given DPIO object.
++ *
++ * Return -EINVAL.
++ */
++int dpaa2_io_resume_poll(struct dpaa2_io *obj);
++
++/**
++ * dpaa2_io_service_notifications() - Get a mask of cpus that the DPIO service
++ * can receive notifications on.
++ * @s: the given DPIO object.
++ * @mask: the mask of cpus.
++ *
++ * Note that this is a run-time snapshot. If things like cpu-hotplug are
++ * supported in the target system, then an attempt to register notifications
++ * for a cpu that appears present in the given mask might fail if that cpu has
++ * gone offline in the mean time.
++ */
++void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask);
++
++/**
++ * dpaa2_io_service_stashing - Get a mask of cpus that the DPIO service has stash
++ * affinity to.
++ * @s: the given DPIO object.
++ * @mask: the mask of cpus.
++ */
++void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask);
++
++/**
++ * dpaa2_io_service_nonaffine() - Check the DPIO service's cpu affinity
++ * for stashing.
++ * @s: the given DPIO object.
++ *
++ * Return a boolean, whether or not the DPIO service has resources that have no
++ * particular cpu affinity for stashing. (Useful to know if you wish to operate
++ * on CPUs that the service has no affinity to, you would choose to use
++ * resources that are neutral, rather than affine to a different CPU.) Unlike
++ * other service-specific APIs, this one doesn't return an error if it is passed
++ * a non-service object. So don't do it.
++ */
++int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s);
++
++/*************************/
++/* Notification handling */
++/*************************/
++
++/**
++ * struct dpaa2_io_notification_ctx - The DPIO notification context structure.
++ * @cb: the callback to be invoked when the notification arrives.
++ * @is_cdan: Zero/FALSE for FQDAN, non-zero/TRUE for CDAN.
++ * @id: FQID or channel ID, needed for rearm.
++ * @desired_cpu: the cpu on which the notifications will show up.
++ * @actual_cpu: the cpu the notification actually shows up.
++ * @migration_cb: callback function used for migration.
++ * @dpio_id: the dpio index.
++ * @qman64: the 64-bit context value shows up in the FQDAN/CDAN.
++ * @node: the list node.
++ * @dpio_private: the dpio object internal to dpio_service.
++ *
++ * When a FQDAN/CDAN registration is made (eg. by DPNI/DPCON/DPAI code), a
++ * context of the following type is used. The caller can embed it within a
++ * larger structure in order to add state that is tracked along with the
++ * notification (this may be useful when callbacks are invoked that pass this
++ * notification context as a parameter).
++ */
++struct dpaa2_io_notification_ctx {
++ void (*cb)(struct dpaa2_io_notification_ctx *);
++ int is_cdan;
++ uint32_t id;
++ /* This specifies which cpu the user wants notifications to show up on
++ * (ie. to execute 'cb'). If notification-handling on that cpu is not
++ * available at the time of notification registration, the registration
++ * will fail. */
++ int desired_cpu;
++ /* If the target platform supports cpu-hotplug or other features
++ * (related to power-management, one would expect) that can migrate IRQ
++ * handling of a given DPIO object, then this value will potentially be
++ * different to 'desired_cpu' at run-time. */
++ int actual_cpu;
++ /* And if migration does occur and this callback is non-NULL, it will
++ * be invoked prior to any futher notification callbacks executing on
++ * 'newcpu'. Note that 'oldcpu' is what 'actual_cpu' was prior to the
++ * migration, and 'newcpu' is what it is now. Both could conceivably be
++ * different to 'desired_cpu'. */
++ void (*migration_cb)(struct dpaa2_io_notification_ctx *,
++ int oldcpu, int newcpu);
++ /* These are returned from dpaa2_io_service_register().
++ * 'dpio_id' is the dpaa2_io_desc::dpio_id value of the DPIO object that
++ * has been selected by the service for receiving the notifications. The
++ * caller can use this value in the MC command that attaches the FQ (or
++ * channel) of their DPNI (or DPCON, respectively) to this DPIO for
++ * notification-generation.
++ * 'qman64' is the 64-bit context value that needs to be sent in the
++ * same MC command in order to be programmed into the FQ or channel -
++ * this is the 64-bit value that shows up in the FQDAN/CDAN messages to
++ * the DPIO object, and the DPIO service specifies this value back to
++ * the caller so that the notifications that show up will be
++ * comprensible/demux-able to the DPIO service. */
++ int dpio_id;
++ uint64_t qman64;
++ /* These fields are internal to the DPIO service once the context is
++ * registered. TBD: may require more internal state fields. */
++ struct list_head node;
++ void *dpio_private;
++};
++
++/**
++ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
++ * notifications on the given DPIO service.
++ * @service: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * The MC command to attach the caller's DPNI/DPCON/DPAI device to a
++ * DPIO object is performed after this function is called. In that way, (a) the
++ * DPIO service is "ready" to handle a notification arrival (which might happen
++ * before the "attach" command to MC has returned control of execution back to
++ * the caller), and (b) the DPIO service can provide back to the caller the
++ * 'dpio_id' and 'qman64' parameters that it should pass along in the MC command
++ * in order for the DPNI/DPCON/DPAI resources to be configured to produce the
++ * right notification fields to the DPIO service.
++ *
++ * Return 0 for success, or -ENODEV for failure.
++ */
++int dpaa2_io_service_register(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++
++/**
++ * dpaa2_io_service_deregister - The opposite of 'register'.
++ * @service: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * Note that 'register' should be called *before*
++ * making the MC call to attach the notification-producing device to the
++ * notification-handling DPIO service, the 'unregister' function should be
++ * called *after* making the MC call to detach the notification-producing
++ * device.
++ *
++ * Return 0 for success.
++ */
++int dpaa2_io_service_deregister(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++
++/**
++ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
++ * @service: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
++ * considered "disarmed". Ie. the user can issue pull dequeue operations on that
++ * traffic source for as long as it likes. Eventually it may wish to "rearm"
++ * that source to allow it to produce another FQDAN/CDAN, that's what this
++ * function achieves.
++ *
++ * Return 0 for success, or -ENODEV if no service available, -EBUSY/-EIO for not
++ * being able to implement the rearm the notifiaton due to setting CDAN or
++ * scheduling fq.
++ */
++int dpaa2_io_service_rearm(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++
++/**
++ * dpaa2_io_from_registration() - Get the DPIO object from the given notification
++ * context.
++ * @ctx: the given notifiation context.
++ * @ret: the returned DPIO object.
++ *
++ * Like 'dpaa2_io_service_get_persistent()' (see below), except that the
++ * returned handle is not selected based on a 'cpu' argument, but is the same
++ * DPIO object that the given notification context is registered against. The
++ * returned handle carries a reference count, so a corresponding dpaa2_io_down()
++ * would be required when the reference is no longer needed.
++ *
++ * Return 0 for success, or -EINVAL for failure.
++ */
++int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx,
++ struct dpaa2_io **ret);
++
++/**********************************/
++/* General usage of DPIO services */
++/**********************************/
++
++/**
++ * dpaa2_io_service_get_persistent() - Get the DPIO resource from the given
++ * notification context and cpu.
++ * @service: the DPIO service.
++ * @cpu: the cpu that the DPIO resource has stashing affinity to.
++ * @ret: the returned DPIO resource.
++ *
++ * The various DPIO interfaces can accept a "struct dpaa2_io" handle that refers
++ * to an individual DPIO object or to a whole service. In the latter case, an
++ * internal choice is made for each operation. This function supports the former
++ * case, by selecting an individual DPIO object *from* the service in order for
++ * it to be used multiple times to provide "persistence". The returned handle
++ * also carries a reference count, so a corresponding dpaa2_io_down() would be
++ * required when the reference is no longer needed. Note, a parameter of -1 for
++ * 'cpu' will select a DPIO resource that has no particular stashing affinity to
++ * any cpu (eg. one that stashes to platform cache).
++ *
++ * Return 0 for success, or -ENODEV for failure.
++ */
++int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu,
++ struct dpaa2_io **ret);
++
++/*****************/
++/* Pull dequeues */
++/*****************/
++
++/**
++ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
++ * @d: the given DPIO service.
++ * @fqid: the given frame queue id.
++ * @s: the dpaa2_io_store object for the result.
++ *
++ * To support DCA/order-preservation, it will be necessary to support an
++ * alternative form, because they must ultimately dequeue to DQRR rather than a
++ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will
++ * "complete" using a caller-provided callback (from DQRR processing) rather
++ * than the caller explicitly looking at their dpaa2_io_store for results. Eg.
++ * the alternative form will likely take a callback parameter rather than a
++ * store parameter. Ignoring it for now to keep the picture clearer.
++ *
++ * Return 0 for success, or error code for failure.
++ */
++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid,
++ struct dpaa2_io_store *s);
++
++/**
++ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
++ * @d: the given DPIO service.
++ * @channelid: the given channel id.
++ * @s: the dpaa2_io_store object for the result.
++ *
++ * To support DCA/order-preservation, it will be necessary to support an
++ * alternative form, because they must ultimately dequeue to DQRR rather than a
++ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will
++ * "complete" using a caller-provided callback (from DQRR processing) rather
++ * than the caller explicitly looking at their dpaa2_io_store for results. Eg.
++ * the alternative form will likely take a callback parameter rather than a
++ * store parameter. Ignoring it for now to keep the picture clearer.
++ *
++ * Return 0 for success, or error code for failure.
++ */
++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid,
++ struct dpaa2_io_store *s);
++
++/************/
++/* Enqueues */
++/************/
++
++/**
++ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
++ * @d: the given DPIO service.
++ * @fqid: the given frame queue id.
++ * @fd: the frame descriptor which is enqueued.
++ *
++ * This definition bypasses some features that are not expected to be priority-1
++ * features, and may not be needed at all via current assumptions (QBMan's
++ * feature set is wider than the MC object model is intendeding to support,
++ * initially at least). Plus, keeping them out (for now) keeps the API view
++ * simpler. Missing features are;
++ * - enqueue confirmation (results DMA'd back to the user)
++ * - ORP
++ * - DCA/order-preservation (see note in "pull dequeues")
++ * - enqueue consumption interrupts
++ *
++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
++ uint32_t fqid,
++ const struct dpaa2_fd *fd);
++
++/**
++ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
++ * @d: the given DPIO service.
++ * @qdid: the given queuing destination id.
++ * @prio: the given queuing priority.
++ * @qdbin: the given queuing destination bin.
++ * @fd: the frame descriptor which is enqueued.
++ *
++ * This definition bypasses some features that are not expected to be priority-1
++ * features, and may not be needed at all via current assumptions (QBMan's
++ * feature set is wider than the MC object model is intendeding to support,
++ * initially at least). Plus, keeping them out (for now) keeps the API view
++ * simpler. Missing features are;
++ * - enqueue confirmation (results DMA'd back to the user)
++ * - ORP
++ * - DCA/order-preservation (see note in "pull dequeues")
++ * - enqueue consumption interrupts
++ *
++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
++ uint32_t qdid, uint8_t prio, uint16_t qdbin,
++ const struct dpaa2_fd *fd);
++
++/*******************/
++/* Buffer handling */
++/*******************/
++
++/**
++ * dpaa2_io_service_release() - Release buffers to a buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the buffer pool id.
++ * @buffers: the buffers to be released.
++ * @num_buffers: the number of the buffers to be released.
++ *
++ * Return 0 for success, and negative error code for failure.
++ */
++int dpaa2_io_service_release(struct dpaa2_io *d,
++ uint32_t bpid,
++ const uint64_t *buffers,
++ unsigned int num_buffers);
++
++/**
++ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the buffer pool id.
++ * @buffers: the buffer addresses for acquired buffers.
++ * @num_buffers: the expected number of the buffers to acquire.
++ *
++ * Return a negative error code if the command failed, otherwise it returns
++ * the number of buffers acquired, which may be less than the number requested.
++ * Eg. if the buffer pool is empty, this will return zero.
++ */
++int dpaa2_io_service_acquire(struct dpaa2_io *d,
++ uint32_t bpid,
++ uint64_t *buffers,
++ unsigned int num_buffers);
++
++/***************/
++/* DPIO stores */
++/***************/
++
++/* These are reusable memory blocks for retrieving dequeue results into, and to
++ * assist with parsing those results once they show up. They also hide the
++ * details of how to use "tokens" to make detection of DMA results possible (ie.
++ * comparing memory before the DMA and after it) while minimising the needless
++ * clearing/rewriting of those memory locations between uses.
++ */
++
++/**
++ * dpaa2_io_store_create() - Create the dma memory storage for dequeue
++ * result.
++ * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
++ * @dev: the device to allow mapping/unmapping the DMAable region.
++ *
++ * Constructor - max_frames must be <= 16. The user provides the
++ * device struct to allow mapping/unmapping of the DMAable region. Area for
++ * storage will be allocated during create. The size of this storage is
++ * "max_frames*sizeof(struct dpaa2_dq)". The 'dpaa2_io_store' returned is a
++ * wrapper structure allocated within the DPIO code, which owns and manages
++ * allocated store.
++ *
++ * Return dpaa2_io_store struct for successfuly created storage memory, or NULL
++ * if not getting the stroage for dequeue result in create API.
++ */
++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
++ struct device *dev);
++
++/**
++ * dpaa2_io_store_destroy() - Destroy the dma memory storage for dequeue
++ * result.
++ * @s: the storage memory to be destroyed.
++ *
++ * Frees to specified storage memory.
++ */
++void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
++
++/**
++ * dpaa2_io_store_next() - Determine when the next dequeue result is available.
++ * @s: the dpaa2_io_store object.
++ * @is_last: indicate whether this is the last frame in the pull command.
++ *
++ * Once dpaa2_io_store has been passed to a function that performs dequeues to
++ * it, like dpaa2_ni_rx(), this function can be used to determine when the next
++ * frame result is available. Once this function returns non-NULL, a subsequent
++ * call to it will try to find the *next* dequeue result.
++ *
++ * Note that if a pull-dequeue has a null result because the target FQ/channel
++ * was empty, then this function will return NULL rather than expect the caller
++ * to always check for this on his own side. As such, "is_last" can be used to
++ * differentiate between "end-of-empty-dequeue" and "still-waiting".
++ *
++ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
++ */
++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++/**
++ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
++ * @d: the given DPIO object.
++ * @fqid: the id of frame queue to be queried.
++ * @fcnt: the queried frame count.
++ * @bcnt: the queried byte count.
++ *
++ * Knowing the FQ count at run-time can be useful in debugging situations.
++ * The instantaneous frame- and byte-count are hereby returned.
++ *
++ * Return 0 for a successful query, and negative error code if query fails.
++ */
++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid,
++ uint32_t *fcnt, uint32_t *bcnt);
++
++/**
++ * dpaa2_io_query_bp_count() - Query the number of buffers currenty in a
++ * buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the index of buffer pool to be queried.
++ * @num: the queried number of buffers in the buffer pool.
++ *
++ * Return 0 for a sucessful query, and negative error code if query fails.
++ */
++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid,
++ uint32_t *num);
++#endif
++#endif /* __FSL_DPAA2_IO_H */
diff --git a/target/linux/layerscape/patches-4.4/7199-dpaa2-dpio-Cosmetic-cleanup.patch b/target/linux/layerscape/patches-4.4/7199-dpaa2-dpio-Cosmetic-cleanup.patch
new file mode 100644
index 0000000..dd5eb7e
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7199-dpaa2-dpio-Cosmetic-cleanup.patch
@@ -0,0 +1,35 @@
+From a4150e8ec8da3add3933dd026c7154dcca2ee2e7 Mon Sep 17 00:00:00 2001
+From: Mihai Caraman <mihai.caraman at freescale.com>
+Date: Tue, 5 Apr 2016 14:47:57 +0000
+Subject: [PATCH 199/226] dpaa2-dpio: Cosmetic cleanup
+
+Replace obsolete terms.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h | 2 +-
+ drivers/staging/fsl-mc/bus/dpio/qbman_portal.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h
++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h
+@@ -51,7 +51,7 @@ struct qbman_block_desc {
+ * Descriptor for a QBMan software portal, expressed in terms that make sense to
+ * the user context. Ie. on MC, this information is likely to be true-physical,
+ * and instantiated statically at compile-time. On GPP, this information is
+- * likely to be obtained via "discovery" over a partition's "layerscape bus"
++ * likely to be obtained via "discovery" over a partition's "MC bus"
+ * (ie. in response to a MC portal command), and would take into account any
+ * virtualisation of the GPP user's address space and/or interrupt numbering.
+ */
+--- a/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h
+@@ -138,7 +138,7 @@ static inline void *qbman_swp_mc_complet
+ /* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
+ * is either serving as a configuration command or a query result. The
+ * representation is inherently little-endian, as the indexing of the words is
+- * itself little-endian in nature and layerscape is little endian for anything
++ * itself little-endian in nature and DPAA2 is little endian for anything
+ * that crosses a word boundary too (64-bit fields are the obvious examples).
+ */
+ struct qb_attr_code {
diff --git a/target/linux/layerscape/patches-4.4/7200-staging-fsl-mc-dpio-driver-match-id-cleanup.patch b/target/linux/layerscape/patches-4.4/7200-staging-fsl-mc-dpio-driver-match-id-cleanup.patch
new file mode 100644
index 0000000..91ff06a
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7200-staging-fsl-mc-dpio-driver-match-id-cleanup.patch
@@ -0,0 +1,26 @@
+From 3cc23880ecb98efe2d868254201ac58f945d9e1d Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 15 Jun 2016 14:05:08 -0500
+Subject: [PATCH 200/226] staging: fsl-mc: dpio driver match id cleanup
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-mc/bus/dpio/dpio-drv.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c
+@@ -364,12 +364,10 @@ err_mcportal:
+ return err;
+ }
+
+-static const struct fsl_mc_device_match_id dpaa2_dpio_match_id_table[] = {
++static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpio",
+- .ver_major = DPIO_VER_MAJOR,
+- .ver_minor = DPIO_VER_MINOR
+ },
+ { .vendor = 0x0 }
+ };
diff --git a/target/linux/layerscape/patches-4.4/7201-staging-dpaa2-eth-initial-commit-of-dpaa2-eth-driver.patch b/target/linux/layerscape/patches-4.4/7201-staging-dpaa2-eth-initial-commit-of-dpaa2-eth-driver.patch
new file mode 100644
index 0000000..cbec144
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7201-staging-dpaa2-eth-initial-commit-of-dpaa2-eth-driver.patch
@@ -0,0 +1,12268 @@
+From e588172442093fe22374dc1bfc88a7da751d6b30 Mon Sep 17 00:00:00 2001
+From: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Date: Tue, 15 Sep 2015 10:14:16 -0500
+Subject: [PATCH 201/226] staging: dpaa2-eth: initial commit of dpaa2-eth
+ driver
+
+commit 3106ece5d96784b63a4eabb26661baaefedd164f
+[context adjustment]
+
+This is a commit of a squash of the cumulative dpaa2-eth patches
+in the sdk 2.0 kernel as of 3/7/2016.
+
+flib,dpaa2-eth: flib header update (Rebasing onto kernel 3.19, MC 0.6)
+
+this patch was moved from 4.0 branch
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+[Stuart: split into multiple patches]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+Integrated-by: Jilong Guo <jilong.guo at nxp.com>
+
+flib,dpaa2-eth: updated Eth (was: Rebasing onto kernel 3.19, MC 0.6)
+
+updated Ethernet driver from 4.0 branch
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+[Stuart: cherry-picked patch from 4.0 and split it up]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+Conflicts:
+
+ drivers/staging/Makefile
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+dpaa2-eth: Adjust 'options' size
+
+The 'options' field of various MC configuration structures has changed
+from u64 to u32 as of MC firmware version 7.0.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I9ba0c19fc22f745e6be6cc40862afa18fa3ac3db
+Reviewed-on: http://git.am.freescale.net:8181/35579
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Selectively disable preemption
+
+Temporary workaround for a MC Bus API quirk which only allows us to
+specify exclusively, either a spinlock-protected MC Portal, or a
+mutex-protected one, but then tries to match the runtime context in
+order to enforce their usage.
+
+Te Be Reverted.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Ida2ec1fdbdebfd2e427f96ddad7582880146fda9
+Reviewed-on: http://git.am.freescale.net:8181/35580
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Fix ethtool bug
+
+We were writing beyond the end of the allocated data area for ethtool
+statistics.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: I6b77498a78dad06970508ebbed7144be73854f7f
+Reviewed-on: http://git.am.freescale.net:8181/35583
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Retry read if store unexpectedly empty
+
+After we place a volatile dequeue command, we might get to inquire the
+store before the DMA has actually completed. In such cases, we must
+retry, lest we'll have the store overwritten by the next legitimate
+volatile dequeue.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I314fbb8b4d9f589715e42d35fc6677d726b8f5ba
+Reviewed-on: http://git.am.freescale.net:8181/35584
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+flib: Fix "missing braces around initializer" warning
+
+Gcc does not support (yet?) the ={0} initializer in the case of an array
+of structs. Fixing the Flib in order to make the warning go away.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I8782ecb714c032cfeeecf4c8323cf9dbb702b10f
+Reviewed-on: http://git.am.freescale.net:8181/35586
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+Tested-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+Revert "dpaa2-eth: Selectively disable preemption"
+
+This reverts commit e1455823c33b8dd48b5d2d50a7e8a11d3934cc0d.
+
+dpaa2-eth: Fix memory leak
+
+A buffer kmalloc'ed at probe time was not freed after it was no
+longer needed.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: Iba197209e9203ed306449729c6dcd23ec95f094d
+Reviewed-on: http://git.am.freescale.net:8181/35756
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Remove unused field in ldpaa_eth_priv structure
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: I124c3e4589b6420b1ea5cc05a03a51ea938b2bea
+Reviewed-on: http://git.am.freescale.net:8181/35757
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Fix "NOHZ: local_softirq_pending" warning
+
+Explicitly run softirqs after we enable NAPI. This in particular gets us
+rid of the "NOHZ: local_softirq_pending" warnings, but it also solves a
+couple of other problems, among which fluctuating performance and high
+ping latencies.
+
+Notes:
+ - This will prevent us from timely processing notifications and
+other "non-frame events" coming into the software portal. So far,
+though, we only expect Dequeue Available Notifications, so this patch
+is good enough for now.
+ - A degradation in console responsiveness is expected, especially in
+cases where the bottom-half runs on the same CPU as the console.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: Ia6f11da433024e80ee59e821c9eabfa5068df5e5
+Reviewed-on: http://git.am.freescale.net:8181/35830
+Reviewed-by: Alexandru Marginean <Alexandru.Marginean at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+Tested-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Add polling mode for link state changes
+
+Add the Kconfigurable option of using a thread for polling on
+the link state instead of relying on interrupts from the MC.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: If2fe66fc5c0fbee2568d7afa15d43ea33f92e8e2
+Reviewed-on: http://git.am.freescale.net:8181/35967
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Update copyright years.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I7e00eecfc5569027c908124726edaf06be357c02
+Reviewed-on: http://git.am.freescale.net:8181/37666
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Ruxandra Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Drain bpools when netdev is down
+
+In a data path layout with potentially a dozen interfaces, not all of
+them may be up at the same time, yet they may consume a fair amount of
+buffer space.
+Drain the buffer pool upon ifdown and re-seed it at ifup.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I24a379b643c8b5161a33b966c3314cf91024ed4a
+Reviewed-on: http://git.am.freescale.net:8181/37667
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Ruxandra Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Interrupts cleanup
+
+Add the code for cleaning up interrupts on driver removal.
+This was lost during transition from kernel 3.16 to 3.19.
+
+Also, there's no need to call devm_free_irq() if probe fails
+as the kernel will release all driver resources.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: Ifd404bbf399d5ba62e2896371076719c1d6b4214
+Reviewed-on: http://git.am.freescale.net:8181/36199
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-by: Bharat Bhushan <Bharat.Bhushan at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+Reviewed-on: http://git.am.freescale.net:8181/37690
+
+dpaa2-eth: Ethtool support for hashing
+
+Only one set of header fields is supported for all protocols, the driver
+silently replaces previous configuration regardless of user selected
+protocol.
+Following fields are supported:
+ L2DA
+ VLAN tag
+ L3 proto
+ IP SA
+ IP DA
+ L4 bytes 0 & 1 [TCP/UDP src port]
+ L4 bytes 2 & 3 [TCP/UDP dst port]
+
+Signed-off-by: Alex Marginean <alexandru.marginean at freescale.com>
+
+Change-Id: I97c9dac1b842fe6bc7115e40c08c42f67dee8c9c
+Reviewed-on: http://git.am.freescale.net:8181/37260
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Fix maximum number of FQs
+
+The maximum number of Rx/Tx conf FQs associated to a DPNI was not
+updated when the implementation changed. It just happened to work
+by accident.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I03e30e0121a40d0d15fcdc4bee1fb98caa17c0ef
+Reviewed-on: http://git.am.freescale.net:8181/37668
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Fix Rx buffer address alignment
+
+We need to align the start address of the Rx buffers to
+LDPAA_ETH_BUF_ALIGN bytes. We were using SMP_CACHE_BYTES instead.
+It happened to work because both defines have the value of 64,
+but this may change at some point.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I08a0f3f18f82c5581c491bd395e3ad066b25bcf5
+Reviewed-on: http://git.am.freescale.net:8181/37669
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Add buffer count to ethtool statistics
+
+Print the number of buffers available in the pool for a certain DPNI
+along with the rest of the ethtool -S stats.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Ia1f5cf341c8414ae2058a73f6bc81490ef134592
+Reviewed-on: http://git.am.freescale.net:8181/37671
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Add Rx error queue
+
+Add a Kconfigurable option that allows Rx error frames to be
+enqueued on an error FQ. By default error frames are discarded,
+but for debug purposes we may want to process them at driver
+level.
+
+Note: Checkpatch issues a false positive about complex macros that
+should be parenthesized.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I7d19d00b5d5445514ebd112c886ce8ccdbb1f0da
+Reviewed-on: http://git.am.freescale.net:8181/37672
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+Tested-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+staging: fsl-dpaa2: FLib headers cleanup
+
+Going with the flow of moving fsl-dpaa2 headers into the drivers'
+location rather than keeping them all in one place.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Ia2870cd019a4934c7835d38752a46b2a0045f30e
+Reviewed-on: http://git.am.freescale.net:8181/37674
+Reviewed-by: Ruxandra Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+Tested-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Klocwork fixes
+
+Fix several issues reported by Klocwork.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I1e23365765f3b0ff9b6474d8207df7c1f2433ccd
+Reviewed-on: http://git.am.freescale.net:8181/37675
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Probe devices with no hash support
+
+Don't fail at probe if the DPNI doesn't have the hash distribution
+option enabled. Instead, initialize a single Rx frame queue and
+use it for all incoming traffic.
+
+Rx flow hashing configuration through ethtool will not work
+in this case.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Iaf17e05b15946e6901c39a21b5344b89e9f1d797
+Reviewed-on: http://git.am.freescale.net:8181/37676
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Process frames in IRQ context
+
+Stop using threaded IRQs and move back to hardirq top-halves.
+This is the first patch of a small series adapting the DPIO and Ethernet
+code to these changes.
+
+Signed-off-by: Roy Pledge <roy.pledge at freescale.com>
+Tested-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Tested-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+[Stuart: split dpio and eth into separate patches, updated subject]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Fix bug in NAPI poll
+
+We incorrectly rearmed FQDAN notifications at the end of a NAPI cycle,
+regardless of whether the NAPI budget was consumed or not. We only need
+to rearm notifications if the NAPI cycle cleaned less frames than its
+budget, otherwise a new NAPI poll will be scheduled anyway.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Ib55497bdbd769047420b3150668f2e2aef3c93f6
+Reviewed-on: http://git.am.freescale.net:8181/38317
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Use dma_map_sg on Tx
+
+Use the simpler dma_map_sg() along with the scatterlist API if the
+egress frame is scatter-gather, at the cost of keeping some extra
+information in the frame's software annotation area.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: If293aeabbd58d031f21456704357d4ff7e53c559
+Reviewed-on: http://git.am.freescale.net:8181/37681
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Reduce retries if Tx portal busy
+
+Too many retries due to Tx portal contention led to a significant cycle
+waste and reduction in performance.
+Reducing the number of enqueue retries and dropping frame if eventually
+unsuccessful.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Ib111ec61cd4294a7632348c25fa3d7f4002be0c0
+Reviewed-on: http://git.am.freescale.net:8181/37682
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Add sysfs support for TxConf affinity change
+
+This adds support in sysfs for affining Tx Confirmation queues to GPPs,
+via the affine DPIO objects.
+
+The user can specify a cpu list in /sys/class/net/ni<X>/txconf_affinity
+to which the Ethernet driver will affine the TxConf FQs, in round-robin
+fashion. This is naturally a bit coarse, because there is no "official"
+mapping of the transmitting CPUs to Tx Confirmation queues.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I4b3da632e202ceeb22986c842d746aafe2a87a81
+Reviewed-on: http://git.am.freescale.net:8181/37684
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Implement ndo_select_queue
+
+Use a very simple selection function for the egress FQ. The purpose
+behind this is to more evenly distribute Tx Confirmation traffic,
+especially in the case of multiple egress flows, when bundling it all on
+CPU 0 would make that CPU a bottleneck.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Ibfe8aad7ad5c719cc95d7817d7de6d2094f0f7ed
+Reviewed-on: http://git.am.freescale.net:8181/37685
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Reduce TxConf NAPI weight back to 64
+
+It turns out that not only the kernel frowned upon the old budget of 256,
+but the measured values were well below that anyway.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I62ddd3ea1dbfd8b51e2bcb2286e0d5eb10ac7f27
+Reviewed-on: http://git.am.freescale.net:8181/37688
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Try refilling the buffer pool less often
+
+We used to check if the buffer pool needs refilling at each Rx
+frame. Instead, do that check (and the actual buffer release if
+needed) only after a pull dequeue.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: Id52fab83873c40a711b8cadfcf909eb7e2e210f3
+Reviewed-on: http://git.am.freescale.net:8181/38318
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Stay in NAPI if exact budget is met
+
+An off-by-one bug would cause premature exiting from the NAPI cycle.
+Performance degradation is particularly severe in IPFWD cases.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Tested-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I9de2580c7ff8e46cbca9613890b03737add35e26
+Reviewed-on: http://git.am.freescale.net:8181/37908
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Minor changes to FQ stats
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: I0ced0e7b2eee28599cdea79094336c0d44f0d32b
+Reviewed-on: http://git.am.freescale.net:8181/38319
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Support fewer DPIOs than CPUs
+
+The previous DPIO functions would transparently choose a (perhaps
+non-affine) CPU if the required CPU was not available. Now that their API
+contract is enforced, we must make an explicit request for *any* DPIO if
+the request for an *affine* DPIO has failed.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Ib08047ffa33518993b1ffa4671d0d4f36d6793d0
+Reviewed-on: http://git.am.freescale.net:8181/38320
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Roy Pledge <roy.pledge at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: cosmetic changes in hashing code
+
+Signed-off-by: Alex Marginean <alexandru.marginean at freescale.com>
+Change-Id: I79e21a69a6fb68cdbdb8d853c059661f8988dbf9
+Reviewed-on: http://git.am.freescale.net:8181/37258
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Prefetch data before initial access
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: Ie8f0163651aea7e3e197a408f89ca98d296d4b8b
+Reviewed-on: http://git.am.freescale.net:8181/38753
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Use netif_receive_skb
+
+netif_rx() is a leftover since our pre-NAPI codebase.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: I02ff0a059862964df1bf81b247853193994c2dfe
+Reviewed-on: http://git.am.freescale.net:8181/38754
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Use napi_alloc_frag() on Rx.
+
+A bit better-suited than netdev_alloc_frag().
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Change-Id: I8863a783502db963e5dc968f049534c36ad484e2
+Reviewed-on: http://git.am.freescale.net:8181/38755
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Silence skb_realloc_headroom() warning
+
+pktgen tests tend to be too noisy because pktgen does not observe the
+net device's needed_headroom specification and we used to be pretty loud
+about that. We'll print the warning message just once.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I3c12eba29c79aa9c487307d367f6d9f4dbe447a3
+Reviewed-on: http://git.am.freescale.net:8181/38756
+Reviewed-by: Ruxandra Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Print message upon device unplugging
+
+Give a console notification when a DPNI is unplugged. This is useful for
+automated tests to know the operation (which is not instantaneous) has
+finished.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: If33033201fcee7671ad91c2b56badf3fb56a9e3e
+Reviewed-on: http://git.am.freescale.net:8181/38757
+Reviewed-by: Ruxandra Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Add debugfs support
+
+Add debugfs entries for showing detailed per-CPU and per-FQ
+counters for each network interface. Also add a knob for
+resetting these stats.
+The agregated interface statistics were already available through
+ethtool -S.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I55f5bfe07a15b0d1bf0c6175d8829654163a4318
+Reviewed-on: http://git.am.freescale.net:8181/38758
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+Tested-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: limited support for flow steering
+
+Steering is supported on a sub-set of fields, including DMAC, IP SRC
+and DST, L4 ports.
+Steering and hashing configurations depend on each other, that makes
+the whole thing tricky to configure. Currently FS can be configured
+using only the fields selected for hashing and all the hashing fields
+must be included in the match key - masking doesn't work yet.
+
+Signed-off-by: Alex Marginean <alexandru.marginean at freescale.com>
+Change-Id: I9fa3199f7818a9a5f9d69d3483ffd839056cc468
+Reviewed-on: http://git.am.freescale.net:8181/38759
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Ruxandra Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Rename files into the dpaa2 nomenclature
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I1c3d62e2f19a59d4b65727234fd7df2dfd8683d9
+Reviewed-on: http://git.am.freescale.net:8181/38965
+Reviewed-by: Alexandru Marginean <Alexandru.Marginean at freescale.com>
+Reviewed-by: Ruxandra Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+Tested-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+staging: dpaa2-eth: migrated remaining flibs for MC fw 8.0.0
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+[Stuart: split eth part into separate patch, updated subject]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Clear 'backup_pool' attribute
+
+New MC-0.7 firmware allows specifying an alternate buffer pool, but we
+are momentarily not using that feature.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: I0a6e6626512b7bbddfef732c71f1400b67f3e619
+Reviewed-on: http://git.am.freescale.net:8181/39149
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Do programing of MSIs in devm_request_threaded_irq()
+
+With the new dprc_set_obj_irq() we can now program MSIS in the device
+in the callback invoked from devm_request_threaded_irq().
+Since this callback is invoked with interrupts disabled, we need to
+use an atomic portal, instead of the root DPRC's built-in portal
+which is non-atomic.
+
+Signed-off-by: Itai Katz <itai.katz at freescale.com>
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+[Stuart: split original patch into multiple patches]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+dpaa2-eth: Do not map beyond skb tail
+
+On Tx do dma_map only until skb->tail, rather than skb->end.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Declare NETIF_F_LLTX as a capability
+
+We are effectively doing lock-less Tx.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Avoid bitcopy of 'backpointers' struct
+
+Make 'struct ldpaa_eth_swa bps' a pointer and void copying it on both Tx
+and TxConf.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Use CDANs instead of FQDANs
+
+Use Channel Dequeue Available Notifications (CDANs) instead of
+Frame Queue notifications. We allocate a QMan channel (or DPCON
+object) for each available cpu and assign to it the Rx and Tx conf
+queues associated with that cpu.
+
+We usually want to have affine DPIOs and DPCONs (one for each core).
+If this is not possible due to insufficient resources, we distribute
+all ingress traffic on the cores with affine DPIOs.
+
+NAPI instances are now one per channel instead of one per FQ, as the
+interrupt source changes. Statistics counters change accordingly.
+
+Note that after this commit is applied, one needs to provide sufficient
+DPCON objects (either through DPL on restool) in order for the Ethernet
+interfaces to work.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Cleanup debugfs statistics
+
+Several minor changes to statistics reporting:
+* Fix print alignment of statistics counters
+* Fix a naming ambiguity in the cpu_stats debugfs ops
+* Add Rx/Tx error counters; these were already used, but not
+reported in the per-CPU stats
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Add tx shaping configuration in sysfs
+
+Egress traffic can be shaped via a per-DPNI SysFS entry:
+ echo M N > /sys/class/net/ni<X>/tx_shaping
+where:
+ M is the maximum throughput, expressed in Mbps.
+ N is the maximum burst size, expressed in bytes, at most 64000.
+
+To remove shaping, use M=0, N=0.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Fix "Tx busy" counter
+
+Under heavy egress load, when a large number of the transmitted packets
+cannot be sent because of high portal contention, the "Tx busy" counter
+was not properly incremented.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Fix memory cleanup in case of Tx congestion
+
+The error path of ldpaa_eth_tx() was not properly freeing the SGT buffer
+if the enqueue had failed because of congestion. DMA unmapping was
+missing, too.
+
+Factor the code originally inside the TxConf callback out into a
+separate function that would be called on both TxConf and Tx paths.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Use napi_gro_receive()
+
+Call napi_gro_receive(), effectively enabling GRO.
+NOTE: We could further optimize this by looking ahead in the parse results
+received from hardware and only using GRO when the L3+L4 combination is
+appropriate.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Fix compilation of Rx Error FQ code
+
+Conditionally-compiled code slipped between cracks when FLibs were
+updated.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: Add Kconfig dependency on DEBUG_FS
+
+The driver's debugfs support depends on the generic CONFIG_DEBUG_FS.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Fix interface down/up bug
+
+If a networking interface was brought down while still receiving
+ingress traffic, the delay between DPNI disable and NAPI disable
+was not enough to ensure all in-flight frames got processed.
+Instead, some frames were left pending in the Rx queues. If the
+net device was then removed (i.e. restool unbind/unplug), the
+call to dpni_reset() silently failed and the kernel crashed on
+device replugging.
+
+Fix this by increasing the FQ drain time. Also, at ifconfig up
+we enable NAPI before starting the DPNI, to make sure we don't
+miss any early CDANs.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Iterate only through initialized channels
+
+The number of DPIO objects available to a DPNI may be fewer than the
+number of online cores. A typical example would be a DPNI with a
+distribution size smaller than 8. Since we only initialize as many
+channels (DPCONs) as there are DPIOs, iterating through all online cpus
+would produce a nasty oops when retrieving ethtool stats.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+net: pktgen: Observe needed_headroom of the device
+
+Allocate enough space so as not to force the outgoing net device to do
+skb_realloc_headroom().
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+
+dpaa2-eth: Trace buffer pool seeding
+
+Add ftrace support for buffer pool seeding. Individual buffers are
+described by virtual and dma addresses and sizes, as well as by bpid.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Explicitly set carrier off at ifconfig up
+
+If we don't, netif_carrier_ok() will still return true even if the link
+state is marked as LINKWATCH_PENDING, which in a dpni-2-dpni case may
+last indefinitely long. This will cause "ifconfig up" followed by "ip
+link show" to report LOWER_UP when the peer DPNI is still down (and in
+fact before we've even received any link notification at all).
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Fix FQ type in stats print
+
+Fix a bug where the type of the Rx error queue was printed
+incorrectly in the debugfs statistics
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Don't build debugfs support as a separate module
+
+Instead have module init and exit functions declared explicitly for
+the Ethernet driver and initialize/destroy the debugfs directory there.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Remove debugfs #ifdefs from dpaa2-eth.c
+
+Instead of conditionally compiling the calls to debugfs init
+functions in dpaa2-eth.c, define no-op stubs for these functions
+in case the debugfs Kconfig option is not enabled. This makes
+the code more readable.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Use napi_complete_done()
+
+Replace napi_complete() with napi_complete_done().
+
+Together with setting /sys/class/net/ethX/gro_flush_timeout, this
+allows us to take better advantage of GRO coalescing and improves
+throughput and cpu load in TCP termination tests.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Fix error path in probe
+
+NAPI delete was called at the wrong place when exiting probe
+function on an error path
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Allocate channels based on queue count
+
+Limit the number of channels allocated per DPNI to the maximum
+between the number of Rx queues per traffic class (distribution size)
+and Tx confirmation queues (number of tx flows).
+If this happens to be larger than the number of available cores, only
+allocate one channel for each core and distribute the frame queues on
+the cores/channels in a round robin fashion.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Use DPNI setting for number of Tx flows
+
+Instead of creating one Tx flow for each online cpu, use the DPNI
+attributes for deciding how many senders we have.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Renounce sentinel in enum dpni_counter
+
+Bring back the Flib header dpni.h to its initial content by removing the
+sentinel value in enum dpni_counter.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Fix Rx queue count
+
+We were missing a roundup to the next power of 2 in order to be in sync
+with the MC implementation.
+Actually, moved that logic in a separate function which we'll remove
+once the MC API is updated.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Unmap the S/G table outside ldpaa_eth_free_rx_fd
+
+The Scatter-Gather table is already unmapped outside ldpaa_eth_free_rx_fd
+so no need to try to unmap it once more.
+
+Signed-off-by: Cristian Sovaiala <cristian.sovaiala at freescale.com>
+
+dpaa2-eth: Use napi_schedule_irqoff()
+
+At the time we schedule NAPI, the Dequeue Available Notifications (which
+are the de facto triggers of NAPI processing) are already disabled.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+net: Fix ethernet Kconfig
+
+Re-add missing 'source' directive. This exists on the integration
+branch, but was mistakenly removed by an earlier dpaa2-eth rebase.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Manually update link state at ifup
+
+The DPMAC may have handled the link state notification before the DPNI
+is up. A new PHY state transision may not subsequently occur, so the
+DPNI must initiate a read of the DPMAC state.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Stop carrier upon ifdown
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Fix print messages in link state handling code
+
+Avoid an "(uninitialized)" message during DPNI probe by replacing
+netdev_info() with its corresponding dev_info().
+Purge some related comments and add some netdev messages to assist
+link state debugging.
+Remove an excessively defensive assertion.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Do not allow ethtool settings change while the NI is up
+
+Due to a MC limitation, link state changes while the DPNI is enabled
+will fail. For now, we'll just prevent the call from going down to the MC
+if we know it will fail.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Reduce ethtool messages verbosity
+
+Transform a couple of netdev_info() calls into netdev_dbg().
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Only unmask IRQs that we actually handle
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Produce fewer boot log messages
+
+No longer print one line for each all-zero hwaddr that was replaced with
+a random MAC address; just inform the user once that this has occurred.
+And reduce redundancy of some printouts in the bootlog.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Fix big endian issue
+
+We were not doing any endianness conversions on the scatter gather
+table entries, which caused problems on big endian kernels.
+
+For frame descriptors the QMan driver takes care of this transparently,
+but in the case of SG entries we need to do it ourselves.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Force atomic context for lazy bpool seeding
+
+We use the same ldpaa_bp_add_7() function for initial buffer pool
+seeding (from .ndo_open) and for hotpath pool replenishing. The function
+is using napi_alloc_frag() as an optimization for the Rx datapath, but
+that turns out to require atomic execution because of a this_cpu_ptr()
+call down its stack.
+This patch temporarily disables preemption around the initial seeding of
+the Rx buffer pool.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa-eth: Integrate Flib version 0.7.1.2
+
+Although API-compatible with 0.7.1.1, there are some ABI changes
+that warrant a new integration.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: No longer adjust max_dist_per_tc
+
+The MC firmware until version 0.7.1.1/8.0.2 requires that
+max_dist_per_tc have the value expected by the hardware, which would be
+different from what the user expects. MC firmware 0.7.1.2/8.0.5 fixes
+that, so we remove our transparent conversion.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Enforce 256-byte Rx alignment
+
+Hardware erratum enforced by MC requires that Rx buffer lengths and
+addresses be 265-byte aligned.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Rename Tx buf alignment macro
+
+The existing "BUF_ALIGN" macro remained confined to Tx usage, after
+separate alignment was introduced for Rx. Renaming accordingly.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Fix hashing distribution size
+
+Commit be3fb62623e4338e60fb60019f134b6055cbc127
+Author: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Date: Fri Oct 23 18:26:44 2015 +0300
+
+ dpaa2-eth: No longer adjust max_dist_per_tc
+
+missed one usage of the ldpaa_queue_count() function, making
+distribution size inadvertenly lower.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Remove ndo_select_queue
+
+Our implementation of ndo_select_queue would lead to questions regarding
+our support for qdiscs. Until we find an optimal way to select the txq
+without breaking future qdisc integration, just remove the
+ndo_select_queue callback entirely and leave the stack figure out the
+flow.
+This incurs a ~2-3% penalty on some performance tests.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Select TxConf FQ based on processor id
+
+Use smp_processor_id instead of skb queue mapping to determine the tx
+flow id and implicitly the confirmation queue.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+dpaa2-eth: Reduce number of buffers in bpool
+
+Reduce the maximum number of buffers in each buffer pool associated
+with a DPNI. This in turn reduces the number of memory allocations
+performed in a single batch when buffers fall below a certain
+threshold.
+
+Provides a significant performance boost (~5-10% increase) on both
+termination and forwarding scenarios, while also reducing the driver
+memory footprint.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Replace "ldpaa" with "dpaa2"
+
+Replace all instances of "ldpaa"/"LDPAA" in the Ethernet driver
+(names of functions, structures, macros, etc), with "dpaa2"/"DPAA2",
+except for DPIO API function calls.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: rename ldpaa to dpaa2
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+(Stuart: this patch was split out from the origin global rename patch)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+dpaa2-eth: Rename dpaa_io_query_fq_count to dpaa2_io_query_fq_count
+
+Signed-off-by: Cristian Sovaiala <cristian.sovaiala at freescale.com>
+
+fsl-dpio: rename dpaa_* structure to dpaa2_*
+
+Signed-off-by: Haiying Wang <Haiying.wang at freescale.com>
+
+dpaa2-eth, dpni, fsl-mc: Updates for MC0.8.0
+
+Several changes need to be performed in sync for supporting
+the newest MC version:
+* Update mc-cmd.h
+* Update the dpni binary interface to v6.0
+* Update the DPAA2 Eth driver to account for several API changes
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+staging: fsl-dpaa2: ethernet: add support for hardware timestamping
+
+Signed-off-by: Yangbo Lu <yangbo.lu at nxp.com>
+
+fsl-dpaa2: eth: Do not set bpid in egress fd
+
+We don't do FD recycling on egress, BPID is therefore not necessary.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Amend buffer refill comment
+
+A change request has been pending for placing an upper bound to the
+buffer replenish logic on Rx. However, short of practical alternatives,
+resort to amending the relevant comment and rely on ksoftirqd to
+guarantee interactivity.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Configure a taildrop threshold for each Rx frame queue.
+
+The selected value allows for Rx jumbo (10K) frames processing
+while at the same time helps balance the system in the case of
+IP forwarding.
+
+Also compute the number of buffers in the pool based on the TD
+threshold to avoid starving some of the ingress queues in small
+frames, high throughput scenarios.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Check objects' FLIB version
+
+Make sure we support the DPNI, DPCON and DPBP version, otherwise
+abort probing early on and provide an error message.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Remove likely/unlikely from cold paths
+
+Signed-off-by: Cristian Sovaiala <cristi.sovaiala at nxp.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Remove __cold attribute
+
+Signed-off-by: Cristian Sovaiala <cristi.sovaiala at nxp.com>
+
+fsl-dpaa2: eth: Replace netdev_XXX with dev_XXX before register_netdevice()
+
+Signed-off-by: Cristian Sovaiala <cristi.sovaiala at nxp.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Fix coccinelle issue
+
+drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c:687:1-36: WARNING:
+Assignment of bool to 0/1
+
+Signed-off-by: Cristian Sovaiala <cristi.sovaiala at nxp.com>
+
+fsl-dpaa2: eth: Fix minor spelling issue
+
+Signed-off-by: Cristian Sovaiala <cristi.sovaiala at nxp.com>
+
+fsl-dpaa2: eth: Add a couple of 'unlikely' on hot path
+
+Signed-off-by: Cristian Sovaiala <cristi.sovaiala at nxp.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Fix a bunch of minor issues found by static analysis tools
+
+As found by Klocworks and Checkpatch:
+ - Unused variables
+ - Integer type replacements
+ - Unchecked memory allocations
+ - Whitespace, alignment and newlining
+
+Signed-off-by: Cristian Sovaiala <cristi.sovaiala at nxp.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Remove "inline" keyword from static functions
+
+Signed-off-by: Cristian Sovaiala <cristi.sovaiala at nxp.com>
+
+fsl-dpaa2: eth: Remove BUG/BUG_ONs
+
+Signed-off-by: Cristian Sovaiala <cristi.sovaiala at nxp.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Use NAPI_POLL_WEIGHT
+
+No need to define our own macro as long as we're using the
+default value of 64.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Move dpaa2_eth_swa structure to header file
+
+It was the only structure defined inside dpaa2-eth.c
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Replace uintX_t with uX
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Minor fixes & cosmetics
+
+- Make driver log level an int, because this is what
+ netif_msg_init expects.
+- Remove driver description macro as it was used only once,
+ immediately after being defined
+- Remove include comment
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Move bcast address setup to dpaa2_eth_netdev_init
+
+It seems to fit better there than directly in probe.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Fix DMA mapping bug
+
+During hashing/flow steering configuration via ethtool, we were
+doing a DMA unmap from the wrong address. Fix the issue by using
+the DMA address that was initially mapped.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Associate buffer counting to queues instead of cpu
+
+Move the buffer counters from being percpu variables to being
+associated with QMan channels. This is more natural as we need
+to dimension the buffer pool count based on distribution size
+rather than number of online cores.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Provide driver and fw version to ethtool
+
+Read fw version from the MC and interpret DPNI FLib major.minor as the
+driver's version. Report these in 'ethool -i'.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Remove dependency on GCOV_KERNEL
+
+Signed-off-by: Cristian Sovaiala <cristi.sovaiala at nxp.com>
+
+fsl-dpaa2: eth: Remove FIXME/TODO comments from the code
+
+Some of the concerns had already been addressed, a couple are being
+fixed in place.
+Left a few TODOs related to the flow-steering code, which needs to be
+revisited before upstreaming anyway.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Remove forward declarations
+
+Instead move the functions such that they are defined prior to
+being used.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Remove dead code in IRQ handler
+
+If any of those conditions were met, it is unlikely we'd ever be there
+in the first place.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Remove dpaa2_dpbp_drain()
+
+Its sole caller was __dpaa2_dpbp_free(), so move its content and get rid
+of one function call.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Remove duplicate define
+
+We somehow ended up with two defines for the maximum number
+of tx queues.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Move header comment to .c file
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Make DPCON allocation failure produce a benign message
+
+Number of DPCONs may be smaller than the number of CPUs in a number of
+valid scenarios. One such scenario is when the DPNI's distribution width
+is smaller than the number of cores and we just don't want to
+over-allocate DPCONs.
+Make the DPCON allocation failure less menacing by changing the logged
+message.
+
+While at it, remove a unused parameter in function prototype.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+dpaa2 eth: irq update
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+Conflicts:
+ drivers/staging/Kconfig
+ drivers/staging/Makefile
+---
+ MAINTAINERS | 15 +
+ drivers/staging/Kconfig | 2 +
+ drivers/staging/Makefile | 1 +
+ drivers/staging/fsl-dpaa2/Kconfig | 11 +
+ drivers/staging/fsl-dpaa2/Makefile | 5 +
+ drivers/staging/fsl-dpaa2/ethernet/Kconfig | 42 +
+ drivers/staging/fsl-dpaa2/ethernet/Makefile | 21 +
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 319 +++
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 61 +
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 ++
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2793 ++++++++++++++++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 366 +++
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 882 +++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 175 ++
+ drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 1058 ++++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1907 +++++++++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.h | 2581 ++++++++++++++++++
+ drivers/staging/fsl-mc/include/mc-cmd.h | 5 +-
+ drivers/staging/fsl-mc/include/net.h | 481 ++++
+ net/core/pktgen.c | 1 +
+ 20 files changed, 10910 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/staging/fsl-dpaa2/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
+ create mode 100644 drivers/staging/fsl-mc/include/net.h
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -4539,6 +4539,21 @@ L: linux-kernel at vger.kernel.org
+ S: Maintained
+ F: drivers/staging/fsl-mc/
+
++FREESCALE DPAA2 ETH DRIVER
++M: Ioana Radulescu <ruxandra.radulescu at freescale.com>
++M: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
++M: Cristian Sovaiala <cristian.sovaiala at freescale.com>
++L: linux-kernel at vger.kernel.org
++S: Maintained
++F: drivers/staging/fsl-dpaa2/ethernet/
++
++FREESCALE QORIQ MANAGEMENT COMPLEX RESTOOL DRIVER
++M: Lijun Pan <Lijun.Pan at freescale.com>
++L: linux-kernel at vger.kernel.org
++S: Maintained
++F: drivers/staging/fsl-mc/bus/mc-ioctl.h
++F: drivers/staging/fsl-mc/bus/mc-restool.c
++
+ FREEVXFS FILESYSTEM
+ M: Christoph Hellwig <hch at infradead.org>
+ W: ftp://ftp.openlinux.org/pub/people/hch/vxfs
+--- a/drivers/staging/Kconfig
++++ b/drivers/staging/Kconfig
+@@ -114,4 +114,6 @@ source "drivers/staging/most/Kconfig"
+
+ source "drivers/staging/fsl_ppfe/Kconfig"
+
++source "drivers/staging/fsl-dpaa2/Kconfig"
++
+ endif # STAGING
+--- a/drivers/staging/Makefile
++++ b/drivers/staging/Makefile
+@@ -49,3 +49,4 @@ obj-$(CONFIG_FSL_DPA) += fsl_q
+ obj-$(CONFIG_WILC1000) += wilc1000/
+ obj-$(CONFIG_MOST) += most/
+ obj-$(CONFIG_FSL_PPFE) += fsl_ppfe/
++obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/Kconfig
+@@ -0,0 +1,11 @@
++#
++# Freescale device configuration
++#
++
++config FSL_DPAA2
++ bool "Freescale DPAA2 devices"
++ depends on FSL_MC_BUS
++ ---help---
++ Build drivers for Freescale DataPath Acceleration Architecture (DPAA2) family of SoCs.
++# TODO move DPIO driver in-here?
++source "drivers/staging/fsl-dpaa2/ethernet/Kconfig"
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/Makefile
+@@ -0,0 +1,5 @@
++#
++# Makefile for the Freescale network device drivers.
++#
++
++obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/Kconfig
+@@ -0,0 +1,42 @@
++#
++# Freescale DPAA Ethernet driver configuration
++#
++# Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
++#
++# This file is released under the GPLv2
++#
++
++menuconfig FSL_DPAA2_ETH
++ tristate "Freescale DPAA2 Ethernet"
++ depends on FSL_DPAA2 && FSL_MC_BUS && FSL_MC_DPIO
++ select FSL_DPAA2_MAC
++ default y
++ ---help---
++ Freescale Data Path Acceleration Architecture Ethernet
++ driver, using the Freescale MC bus driver.
++
++if FSL_DPAA2_ETH
++config FSL_DPAA2_ETH_LINK_POLL
++ bool "Use polling mode for link state"
++ default n
++ ---help---
++ Poll for detecting link state changes instead of using
++ interrupts.
++
++config FSL_DPAA2_ETH_USE_ERR_QUEUE
++ bool "Enable Rx error queue"
++ default n
++ ---help---
++ Allow Rx error frames to be enqueued on an error queue
++ and processed by the driver (by default they are dropped
++ in hardware).
++ This may impact performance, recommended for debugging
++ purposes only.
++
++config FSL_DPAA2_ETH_DEBUGFS
++ depends on DEBUG_FS && FSL_QBMAN_DEBUG
++ bool "Enable debugfs support"
++ default n
++ ---help---
++ Enable advanced statistics through debugfs interface.
++endif
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
+@@ -0,0 +1,21 @@
++#
++# Makefile for the Freescale DPAA Ethernet controllers
++#
++# Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
++#
++# This file is released under the GPLv2
++#
++
++ccflags-y += -DVERSION=\"\"
++
++obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
++
++fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
++fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
++
++#Needed by the tracing framework
++CFLAGS_dpaa2-eth.o := -I$(src)
++
++ifeq ($(CONFIG_FSL_DPAA2_ETH_GCOV),y)
++ GCOV_PROFILE := y
++endif
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
+@@ -0,0 +1,319 @@
++
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#include <linux/module.h>
++#include <linux/debugfs.h>
++#include "dpaa2-eth.h"
++#include "dpaa2-eth-debugfs.h"
++
++#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
++
++
++static struct dentry *dpaa2_dbg_root;
++
++static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
++{
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct rtnl_link_stats64 *stats;
++ struct dpaa2_eth_stats *extras;
++ int i;
++
++ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n",
++ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
++ "Tx SG", "Enq busy");
++
++ for_each_online_cpu(i) {
++ stats = per_cpu_ptr(priv->percpu_stats, i);
++ extras = per_cpu_ptr(priv->percpu_extras, i);
++ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
++ i,
++ stats->rx_packets,
++ stats->rx_errors,
++ extras->rx_sg_frames,
++ stats->tx_packets,
++ stats->tx_errors,
++ extras->tx_conf_frames,
++ extras->tx_sg_frames,
++ extras->tx_portal_busy);
++ }
++
++ return 0;
++}
++
++static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
++{
++ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
++
++ err = single_open(file, dpaa2_dbg_cpu_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
++
++ return err;
++}
++
++static const struct file_operations dpaa2_dbg_cpu_ops = {
++ .open = dpaa2_dbg_cpu_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
++{
++ switch (fq->type) {
++ case DPAA2_RX_FQ:
++ return "Rx";
++ case DPAA2_TX_CONF_FQ:
++ return "Tx conf";
++ case DPAA2_RX_ERR_FQ:
++ return "Rx err";
++ default:
++ return "N/A";
++ }
++}
++
++static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
++{
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct dpaa2_eth_fq *fq;
++ u32 fcnt, bcnt;
++ int i, err;
++
++ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s\n",
++ "VFQID", "CPU", "Type", "Frames", "Pending frames");
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
++ if (err)
++ fcnt = 0;
++
++ seq_printf(file, "%5d%16d%16s%16llu%16u\n",
++ fq->fqid,
++ fq->target_cpu,
++ fq_type_to_str(fq),
++ fq->stats.frames,
++ fcnt);
++ }
++
++ return 0;
++}
++
++static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
++{
++ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
++
++ err = single_open(file, dpaa2_dbg_fqs_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
++
++ return err;
++}
++
++static const struct file_operations dpaa2_dbg_fq_ops = {
++ .open = dpaa2_dbg_fqs_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
++{
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct dpaa2_eth_channel *ch;
++ int i;
++
++ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
++ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
++ "Avg frm/CDAN");
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n",
++ ch->ch_id,
++ ch->nctx.desired_cpu,
++ ch->stats.dequeue_portal_busy,
++ ch->stats.frames,
++ ch->stats.cdan,
++ ch->stats.frames / ch->stats.cdan);
++ }
++
++ return 0;
++}
++
++static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
++{
++ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
++
++ err = single_open(file, dpaa2_dbg_ch_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
++
++ return err;
++}
++
++static const struct file_operations dpaa2_dbg_ch_ops = {
++ .open = dpaa2_dbg_ch_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *offset)
++{
++ struct dpaa2_eth_priv *priv = file->private_data;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_stats *percpu_extras;
++ struct dpaa2_eth_fq *fq;
++ struct dpaa2_eth_channel *ch;
++ int i;
++
++ for_each_online_cpu(i) {
++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
++ memset(percpu_stats, 0, sizeof(*percpu_stats));
++
++ percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
++ memset(percpu_extras, 0, sizeof(*percpu_extras));
++ }
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ memset(&fq->stats, 0, sizeof(fq->stats));
++ }
++
++ for_each_cpu(i, &priv->dpio_cpumask) {
++ ch = priv->channel[i];
++ memset(&ch->stats, 0, sizeof(ch->stats));
++ }
++
++ return count;
++}
++
++static const struct file_operations dpaa2_dbg_reset_ops = {
++ .open = simple_open,
++ .write = dpaa2_dbg_reset_write,
++};
++
++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
++{
++ if (!dpaa2_dbg_root)
++ return;
++
++ /* Create a directory for the interface */
++ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
++ dpaa2_dbg_root);
++ if (!priv->dbg.dir) {
++ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
++ return;
++ }
++
++ /* per-cpu stats file */
++ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", S_IRUGO,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_cpu_ops);
++ if (!priv->dbg.cpu_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_cpu_stats;
++ }
++
++ /* per-fq stats file */
++ priv->dbg.fq_stats = debugfs_create_file("fq_stats", S_IRUGO,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_fq_ops);
++ if (!priv->dbg.fq_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_fq_stats;
++ }
++
++ /* per-fq stats file */
++ priv->dbg.ch_stats = debugfs_create_file("ch_stats", S_IRUGO,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_ch_ops);
++ if (!priv->dbg.fq_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_ch_stats;
++ }
++
++ /* reset stats */
++ priv->dbg.reset_stats = debugfs_create_file("reset_stats", S_IWUSR,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_reset_ops);
++ if (!priv->dbg.reset_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_reset_stats;
++ }
++
++ return;
++
++err_reset_stats:
++ debugfs_remove(priv->dbg.ch_stats);
++err_ch_stats:
++ debugfs_remove(priv->dbg.fq_stats);
++err_fq_stats:
++ debugfs_remove(priv->dbg.cpu_stats);
++err_cpu_stats:
++ debugfs_remove(priv->dbg.dir);
++}
++
++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
++{
++ debugfs_remove(priv->dbg.reset_stats);
++ debugfs_remove(priv->dbg.fq_stats);
++ debugfs_remove(priv->dbg.ch_stats);
++ debugfs_remove(priv->dbg.cpu_stats);
++ debugfs_remove(priv->dbg.dir);
++}
++
++void dpaa2_eth_dbg_init(void)
++{
++ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
++ if (!dpaa2_dbg_root) {
++ pr_err("DPAA2-ETH: debugfs create failed\n");
++ return;
++ }
++
++ pr_info("DPAA2-ETH: debugfs created\n");
++}
++
++void __exit dpaa2_eth_dbg_exit(void)
++{
++ debugfs_remove(dpaa2_dbg_root);
++}
++
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
+@@ -0,0 +1,61 @@
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef DPAA2_ETH_DEBUGFS_H
++#define DPAA2_ETH_DEBUGFS_H
++
++#include <linux/dcache.h>
++#include "dpaa2-eth.h"
++
++extern struct dpaa2_eth_priv *priv;
++
++struct dpaa2_debugfs {
++ struct dentry *dir;
++ struct dentry *fq_stats;
++ struct dentry *ch_stats;
++ struct dentry *cpu_stats;
++ struct dentry *reset_stats;
++};
++
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++void dpaa2_eth_dbg_init(void);
++void dpaa2_eth_dbg_exit(void);
++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
++#else
++static inline void dpaa2_eth_dbg_init(void) {}
++static inline void dpaa2_eth_dbg_exit(void) {}
++static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
++static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
++#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
++
++#endif /* DPAA2_ETH_DEBUGFS_H */
++
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+@@ -0,0 +1,185 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM dpaa2_eth
++
++#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _DPAA2_ETH_TRACE_H
++
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include "dpaa2-eth.h"
++#include <linux/tracepoint.h>
++
++#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
++/* trace_printk format for raw buffer event class */
++#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
++
++/* This is used to declare a class of events.
++ * individual events of this type will be defined below.
++ */
++
++/* Store details about a frame descriptor */
++DECLARE_EVENT_CLASS(dpaa2_eth_fd,
++ /* Trace function prototype */
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
++
++ /* Repeat argument list here */
++ TP_ARGS(netdev, fd),
++
++ /* A structure containing the relevant information we want
++ * to record. Declare name and type for each normal element,
++ * name, type and size for arrays. Use __string for variable
++ * length strings.
++ */
++ TP_STRUCT__entry(
++ __field(u64, fd_addr)
++ __field(u32, fd_len)
++ __field(u16, fd_offset)
++ __string(name, netdev->name)
++ ),
++
++ /* The function that assigns values to the above declared
++ * fields
++ */
++ TP_fast_assign(
++ __entry->fd_addr = dpaa2_fd_get_addr(fd);
++ __entry->fd_len = dpaa2_fd_get_len(fd);
++ __entry->fd_offset = dpaa2_fd_get_offset(fd);
++ __assign_str(name, netdev->name);
++ ),
++
++ /* This is what gets printed when the trace event is
++ * triggered.
++ */
++ TP_printk(TR_FMT,
++ __get_str(name),
++ __entry->fd_addr,
++ __entry->fd_len,
++ __entry->fd_offset)
++);
++
++/* Now declare events of the above type. Format is:
++ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
++ */
++
++/* Tx (egress) fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
++
++ TP_ARGS(netdev, fd)
++);
++
++/* Rx fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
++
++ TP_ARGS(netdev, fd)
++);
++
++/* Tx confirmation fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
++
++ TP_ARGS(netdev, fd)
++);
++
++/* Log data about raw buffers. Useful for tracing DPBP content. */
++TRACE_EVENT(dpaa2_eth_buf_seed,
++ /* Trace function prototype */
++ TP_PROTO(struct net_device *netdev,
++ /* virtual address and size */
++ void *vaddr,
++ size_t size,
++ /* dma map address and size */
++ dma_addr_t dma_addr,
++ size_t map_size,
++ /* buffer pool id, if relevant */
++ u16 bpid),
++
++ /* Repeat argument list here */
++ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
++
++ /* A structure containing the relevant information we want
++ * to record. Declare name and type for each normal element,
++ * name, type and size for arrays. Use __string for variable
++ * length strings.
++ */
++ TP_STRUCT__entry(
++ __field(void *, vaddr)
++ __field(size_t, size)
++ __field(dma_addr_t, dma_addr)
++ __field(size_t, map_size)
++ __field(u16, bpid)
++ __string(name, netdev->name)
++ ),
++
++ /* The function that assigns values to the above declared
++ * fields
++ */
++ TP_fast_assign(
++ __entry->vaddr = vaddr;
++ __entry->size = size;
++ __entry->dma_addr = dma_addr;
++ __entry->map_size = map_size;
++ __entry->bpid = bpid;
++ __assign_str(name, netdev->name);
++ ),
++
++ /* This is what gets printed when the trace event is
++ * triggered.
++ */
++ TP_printk(TR_BUF_FMT,
++ __get_str(name),
++ __entry->vaddr,
++ __entry->size,
++ &__entry->dma_addr,
++ __entry->map_size,
++ __entry->bpid)
++);
++
++/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
++ * The syntax is the same as for DECLARE_EVENT_CLASS().
++ */
++
++#endif /* _DPAA2_ETH_TRACE_H */
++
++/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE dpaa2-eth-trace
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+@@ -0,0 +1,2793 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/etherdevice.h>
++#include <linux/of_net.h>
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++#include <linux/debugfs.h>
++#include <linux/kthread.h>
++#include <linux/net_tstamp.h>
++
++#include "../../fsl-mc/include/mc.h"
++#include "../../fsl-mc/include/mc-sys.h"
++#include "dpaa2-eth.h"
++
++/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
++ * using trace events only need to #include <trace/events/sched.h>
++ */
++#define CREATE_TRACE_POINTS
++#include "dpaa2-eth-trace.h"
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Freescale Semiconductor, Inc");
++MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
++
++static int debug = -1;
++module_param(debug, int, S_IRUGO);
++MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
++
++/* Oldest DPAA2 objects version we are compatible with */
++#define DPAA2_SUPPORTED_DPNI_VERSION 6
++#define DPAA2_SUPPORTED_DPBP_VERSION 2
++#define DPAA2_SUPPORTED_DPCON_VERSION 2
++
++/* Iterate through the cpumask in a round-robin fashion. */
++#define cpumask_rr(cpu, maskptr) \
++do { \
++ (cpu) = cpumask_next((cpu), (maskptr)); \
++ if ((cpu) >= nr_cpu_ids) \
++ (cpu) = cpumask_first((maskptr)); \
++} while (0)
++
++static void dpaa2_eth_rx_csum(struct dpaa2_eth_priv *priv,
++ u32 fd_status,
++ struct sk_buff *skb)
++{
++ skb_checksum_none_assert(skb);
++
++ /* HW checksum validation is disabled, nothing to do here */
++ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
++ return;
++
++ /* Read checksum validation bits */
++ if (!((fd_status & DPAA2_ETH_FAS_L3CV) &&
++ (fd_status & DPAA2_ETH_FAS_L4CV)))
++ return;
++
++ /* Inform the stack there's no need to compute L3/L4 csum anymore */
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++}
++
++/* Free a received FD.
++ * Not to be used for Tx conf FDs or on any other paths.
++ */
++static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
++ const struct dpaa2_fd *fd,
++ void *vaddr)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ u8 fd_format = dpaa2_fd_get_format(fd);
++
++ if (fd_format == dpaa2_fd_sg) {
++ struct dpaa2_sg_entry *sgt = vaddr + dpaa2_fd_get_offset(fd);
++ void *sg_vaddr;
++ int i;
++
++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
++ dpaa2_sg_le_to_cpu(&sgt[i]);
++
++ addr = dpaa2_sg_get_addr(&sgt[i]);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE,
++ DMA_FROM_DEVICE);
++
++ sg_vaddr = phys_to_virt(addr);
++ put_page(virt_to_head_page(sg_vaddr));
++
++ if (dpaa2_sg_is_final(&sgt[i]))
++ break;
++ }
++ }
++
++ put_page(virt_to_head_page(vaddr));
++}
++
++/* Build a linear skb based on a single-buffer frame descriptor */
++static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ void *fd_vaddr)
++{
++ struct sk_buff *skb = NULL;
++ u16 fd_offset = dpaa2_fd_get_offset(fd);
++ u32 fd_length = dpaa2_fd_get_len(fd);
++
++ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUFFER_SIZE +
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
++ if (unlikely(!skb)) {
++ netdev_err(priv->net_dev, "build_skb() failed\n");
++ return NULL;
++ }
++
++ skb_reserve(skb, fd_offset);
++ skb_put(skb, fd_length);
++
++ ch->buf_count--;
++
++ return skb;
++}
++
++/* Build a non linear (fragmented) skb based on a S/G table */
++static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ struct dpaa2_sg_entry *sgt)
++{
++ struct sk_buff *skb = NULL;
++ struct device *dev = priv->net_dev->dev.parent;
++ void *sg_vaddr;
++ dma_addr_t sg_addr;
++ u16 sg_offset;
++ u32 sg_length;
++ struct page *page, *head_page;
++ int page_offset;
++ int i;
++
++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
++ struct dpaa2_sg_entry *sge = &sgt[i];
++
++ dpaa2_sg_le_to_cpu(sge);
++
++ /* We don't support anything else yet! */
++ if (unlikely(dpaa2_sg_get_format(sge) != dpaa2_sg_single)) {
++ dev_warn_once(dev, "Unsupported S/G entry format: %d\n",
++ dpaa2_sg_get_format(sge));
++ return NULL;
++ }
++
++ /* Get the address, offset and length from the S/G entry */
++ sg_addr = dpaa2_sg_get_addr(sge);
++ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUFFER_SIZE,
++ DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(dev, sg_addr))) {
++ netdev_err(priv->net_dev, "DMA unmap failed\n");
++ return NULL;
++ }
++ sg_vaddr = phys_to_virt(sg_addr);
++ sg_length = dpaa2_sg_get_len(sge);
++
++ if (i == 0) {
++ /* We build the skb around the first data buffer */
++ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUFFER_SIZE +
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
++ if (unlikely(!skb)) {
++ netdev_err(priv->net_dev, "build_skb failed\n");
++ return NULL;
++ }
++ sg_offset = dpaa2_sg_get_offset(sge);
++ skb_reserve(skb, sg_offset);
++ skb_put(skb, sg_length);
++ } else {
++ /* Subsequent data in SGEntries are stored at
++ * offset 0 in their buffers, we don't need to
++ * compute sg_offset.
++ */
++ WARN_ONCE(dpaa2_sg_get_offset(sge) != 0,
++ "Non-zero offset in SGE[%d]!\n", i);
++
++ /* Rest of the data buffers are stored as skb frags */
++ page = virt_to_page(sg_vaddr);
++ head_page = virt_to_head_page(sg_vaddr);
++
++ /* Offset in page (which may be compound) */
++ page_offset = ((unsigned long)sg_vaddr &
++ (PAGE_SIZE - 1)) +
++ (page_address(page) - page_address(head_page));
++
++ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
++ sg_length, DPAA2_ETH_RX_BUFFER_SIZE);
++ }
++
++ if (dpaa2_sg_is_final(sge))
++ break;
++ }
++
++ /* Count all data buffers + sgt buffer */
++ ch->buf_count -= i + 2;
++
++ return skb;
++}
++
++static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi)
++{
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ u8 fd_format = dpaa2_fd_get_format(fd);
++ void *vaddr;
++ struct sk_buff *skb;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_stats *percpu_extras;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpaa2_fas *fas;
++ u32 status = 0;
++
++ /* Tracing point */
++ trace_dpaa2_rx_fd(priv->net_dev, fd);
++
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
++ vaddr = phys_to_virt(addr);
++
++ prefetch(vaddr + priv->buf_layout.private_data_size);
++ prefetch(vaddr + dpaa2_fd_get_offset(fd));
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++
++ if (fd_format == dpaa2_fd_single) {
++ skb = dpaa2_eth_build_linear_skb(priv, ch, fd, vaddr);
++ } else if (fd_format == dpaa2_fd_sg) {
++ struct dpaa2_sg_entry *sgt =
++ vaddr + dpaa2_fd_get_offset(fd);
++ skb = dpaa2_eth_build_frag_skb(priv, ch, sgt);
++ put_page(virt_to_head_page(vaddr));
++ percpu_extras->rx_sg_frames++;
++ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
++ } else {
++ /* We don't support any other format */
++ netdev_err(priv->net_dev, "Received invalid frame format\n");
++ goto err_frame_format;
++ }
++
++ if (unlikely(!skb)) {
++ dev_err_once(dev, "error building skb\n");
++ goto err_build_skb;
++ }
++
++ prefetch(skb->data);
++
++ if (priv->ts_rx_en) {
++ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
++ u64 *ns = (u64 *) (vaddr +
++ priv->buf_layout.private_data_size +
++ sizeof(struct dpaa2_fas));
++
++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns);
++ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
++ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
++ }
++
++ /* Check if we need to validate the L4 csum */
++ if (likely(fd->simple.frc & DPAA2_FD_FRC_FASV)) {
++ fas = (struct dpaa2_fas *)
++ (vaddr + priv->buf_layout.private_data_size);
++ status = le32_to_cpu(fas->status);
++ dpaa2_eth_rx_csum(priv, status, skb);
++ }
++
++ skb->protocol = eth_type_trans(skb, priv->net_dev);
++
++ percpu_stats->rx_packets++;
++ percpu_stats->rx_bytes += skb->len;
++
++ if (priv->net_dev->features & NETIF_F_GRO)
++ napi_gro_receive(napi, skb);
++ else
++ netif_receive_skb(skb);
++
++ return;
++err_frame_format:
++err_build_skb:
++ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
++ percpu_stats->rx_dropped++;
++}
++
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi __always_unused)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ void *vaddr;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_fas *fas;
++ u32 status = 0;
++
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
++ vaddr = phys_to_virt(addr);
++
++ if (fd->simple.frc & DPAA2_FD_FRC_FASV) {
++ fas = (struct dpaa2_fas *)
++ (vaddr + priv->buf_layout.private_data_size);
++ status = le32_to_cpu(fas->status);
++
++ /* All frames received on this queue should have at least
++ * one of the Rx error bits set */
++ WARN_ON_ONCE((status & DPAA2_ETH_RX_ERR_MASK) == 0);
++ netdev_dbg(priv->net_dev, "Rx frame error: 0x%08x\n",
++ status & DPAA2_ETH_RX_ERR_MASK);
++ }
++ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_stats->rx_errors++;
++}
++#endif
++
++/* Consume all frames pull-dequeued into the store. This is the simplest way to
++ * make sure we don't accidentally issue another volatile dequeue which would
++ * overwrite (leak) frames already in the store.
++ *
++ * Observance of NAPI budget is not our concern, leaving that to the caller.
++ */
++static int dpaa2_eth_store_consume(struct dpaa2_eth_channel *ch)
++{
++ struct dpaa2_eth_priv *priv = ch->priv;
++ struct dpaa2_eth_fq *fq;
++ struct dpaa2_dq *dq;
++ const struct dpaa2_fd *fd;
++ int cleaned = 0;
++ int is_last;
++
++ do {
++ dq = dpaa2_io_store_next(ch->store, &is_last);
++ if (unlikely(!dq)) {
++ if (unlikely(!is_last)) {
++ netdev_dbg(priv->net_dev,
++ "Channel %d reqturned no valid frames\n",
++ ch->ch_id);
++ /* MUST retry until we get some sort of
++ * valid response token (be it "empty dequeue"
++ * or a valid frame).
++ */
++ continue;
++ }
++ break;
++ }
++
++ /* Obtain FD and process it */
++ fd = dpaa2_dq_fd(dq);
++ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
++ fq->stats.frames++;
++
++ fq->consume(priv, ch, fd, &ch->napi);
++ cleaned++;
++ } while (!is_last);
++
++ return cleaned;
++}
++
++static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb,
++ struct dpaa2_fd *fd)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ void *sgt_buf = NULL;
++ dma_addr_t addr;
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ struct dpaa2_sg_entry *sgt;
++ int i, j, err;
++ int sgt_buf_size;
++ struct scatterlist *scl, *crt_scl;
++ int num_sg;
++ int num_dma_bufs;
++ struct dpaa2_eth_swa *bps;
++
++ /* Create and map scatterlist.
++ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
++ * to go beyond nr_frags+1.
++ * Note: We don't support chained scatterlists
++ */
++ WARN_ON(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1);
++ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
++ if (unlikely(!scl))
++ return -ENOMEM;
++
++ sg_init_table(scl, nr_frags + 1);
++ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
++ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
++ if (unlikely(!num_dma_bufs)) {
++ netdev_err(priv->net_dev, "dma_map_sg() error\n");
++ err = -ENOMEM;
++ goto dma_map_sg_failed;
++ }
++
++ /* Prepare the HW SGT structure */
++ sgt_buf_size = priv->tx_data_offset +
++ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
++ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
++ if (unlikely(!sgt_buf)) {
++ netdev_err(priv->net_dev, "failed to allocate SGT buffer\n");
++ err = -ENOMEM;
++ goto sgt_buf_alloc_failed;
++ }
++ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
++
++ /* PTA from egress side is passed as is to the confirmation side so
++ * we need to clear some fields here in order to find consistent values
++ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
++ * field here.
++ */
++ memset(sgt_buf + priv->buf_layout.private_data_size, 0, 8);
++
++ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
++
++ /* Fill in the HW SGT structure.
++ *
++ * sgt_buf is zeroed out, so the following fields are implicit
++ * in all sgt entries:
++ * - offset is 0
++ * - format is 'dpaa2_sg_single'
++ */
++ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
++ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
++ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
++ }
++ dpaa2_sg_set_final(&sgt[i - 1], true);
++
++ /* Store the skb backpointer in the SGT buffer.
++ * Fit the scatterlist and the number of buffers alongside the
++ * skb backpointer in the SWA. We'll need all of them on Tx Conf.
++ */
++ bps = (struct dpaa2_eth_swa *)sgt_buf;
++ bps->skb = skb;
++ bps->scl = scl;
++ bps->num_sg = num_sg;
++ bps->num_dma_bufs = num_dma_bufs;
++
++ for (j = 0; j < i; j++)
++ dpaa2_sg_cpu_to_le(&sgt[j]);
++
++ /* Separately map the SGT buffer */
++ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(dev, addr))) {
++ netdev_err(priv->net_dev, "dma_map_single() failed\n");
++ err = -ENOMEM;
++ goto dma_map_single_failed;
++ }
++ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
++ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
++ dpaa2_fd_set_addr(fd, addr);
++ dpaa2_fd_set_len(fd, skb->len);
++
++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
++ DPAA2_FD_CTRL_PTV1;
++
++ return 0;
++
++dma_map_single_failed:
++ kfree(sgt_buf);
++sgt_buf_alloc_failed:
++ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
++dma_map_sg_failed:
++ kfree(scl);
++ return err;
++}
++
++static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb,
++ struct dpaa2_fd *fd)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ u8 *buffer_start;
++ struct sk_buff **skbh;
++ dma_addr_t addr;
++
++ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
++ DPAA2_ETH_TX_BUF_ALIGN,
++ DPAA2_ETH_TX_BUF_ALIGN);
++
++ /* PTA from egress side is passed as is to the confirmation side so
++ * we need to clear some fields here in order to find consistent values
++ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
++ * field here.
++ */
++ memset(buffer_start + priv->buf_layout.private_data_size, 0, 8);
++
++ /* Store a backpointer to the skb at the beginning of the buffer
++ * (in the private data area) such that we can release it
++ * on Tx confirm
++ */
++ skbh = (struct sk_buff **)buffer_start;
++ *skbh = skb;
++
++ addr = dma_map_single(dev,
++ buffer_start,
++ skb_tail_pointer(skb) - buffer_start,
++ DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(dev, addr))) {
++ dev_err(dev, "dma_map_single() failed\n");
++ return -EINVAL;
++ }
++
++ dpaa2_fd_set_addr(fd, addr);
++ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
++ dpaa2_fd_set_len(fd, skb->len);
++ dpaa2_fd_set_format(fd, dpaa2_fd_single);
++
++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
++ DPAA2_FD_CTRL_PTV1;
++
++ return 0;
++}
++
++/* DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
++ * back-pointed to is also freed.
++ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
++ * dpaa2_eth_tx().
++ * Optionally, return the frame annotation status word (FAS), which needs
++ * to be checked if we're on the confirmation path.
++ */
++static void dpaa2_eth_free_fd(const struct dpaa2_eth_priv *priv,
++ const struct dpaa2_fd *fd,
++ u32 *status)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ dma_addr_t fd_addr;
++ struct sk_buff **skbh, *skb;
++ unsigned char *buffer_start;
++ int unmap_size;
++ struct scatterlist *scl;
++ int num_sg, num_dma_bufs;
++ struct dpaa2_eth_swa *bps;
++ bool fd_single;
++ struct dpaa2_fas *fas;
++
++ fd_addr = dpaa2_fd_get_addr(fd);
++ skbh = phys_to_virt(fd_addr);
++ fd_single = (dpaa2_fd_get_format(fd) == dpaa2_fd_single);
++
++ if (fd_single) {
++ skb = *skbh;
++ buffer_start = (unsigned char *)skbh;
++ /* Accessing the skb buffer is safe before dma unmap, because
++ * we didn't map the actual skb shell.
++ */
++ dma_unmap_single(dev, fd_addr,
++ skb_tail_pointer(skb) - buffer_start,
++ DMA_TO_DEVICE);
++ } else {
++ bps = (struct dpaa2_eth_swa *)skbh;
++ skb = bps->skb;
++ scl = bps->scl;
++ num_sg = bps->num_sg;
++ num_dma_bufs = bps->num_dma_bufs;
++
++ /* Unmap the scatterlist */
++ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
++ kfree(scl);
++
++ /* Unmap the SGT buffer */
++ unmap_size = priv->tx_data_offset +
++ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
++ dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE);
++ }
++
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
++ struct skb_shared_hwtstamps shhwtstamps;
++ u64 *ns;
++
++ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
++
++ ns = (u64 *)((void *)skbh +
++ priv->buf_layout.private_data_size +
++ sizeof(struct dpaa2_fas));
++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns);
++ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
++ skb_tstamp_tx(skb, &shhwtstamps);
++ }
++
++ /* Check the status from the Frame Annotation after we unmap the first
++ * buffer but before we free it.
++ */
++ if (status && (fd->simple.frc & DPAA2_FD_FRC_FASV)) {
++ fas = (struct dpaa2_fas *)
++ ((void *)skbh + priv->buf_layout.private_data_size);
++ *status = le32_to_cpu(fas->status);
++ }
++
++ /* Free SGT buffer kmalloc'ed on tx */
++ if (!fd_single)
++ kfree(skbh);
++
++ /* Move on with skb release */
++ dev_kfree_skb(skb);
++}
++
++static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpaa2_fd fd;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_stats *percpu_extras;
++ int err, i;
++ /* TxConf FQ selection primarily based on cpu affinity; this is
++ * non-migratable context, so it's safe to call smp_processor_id().
++ */
++ u16 queue_mapping = smp_processor_id() % priv->dpni_attrs.max_senders;
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++
++ /* Setup the FD fields */
++ memset(&fd, 0, sizeof(fd));
++
++ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
++ struct sk_buff *ns;
++
++ dev_info_once(net_dev->dev.parent,
++ "skb headroom too small, must realloc.\n");
++ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
++ if (unlikely(!ns)) {
++ percpu_stats->tx_dropped++;
++ goto err_alloc_headroom;
++ }
++ dev_kfree_skb(skb);
++ skb = ns;
++ }
++
++ /* We'll be holding a back-reference to the skb until Tx Confirmation;
++ * we don't want that overwritten by a concurrent Tx with a cloned skb.
++ */
++ skb = skb_unshare(skb, GFP_ATOMIC);
++ if (unlikely(!skb)) {
++ netdev_err(net_dev, "Out of memory for skb_unshare()");
++ /* skb_unshare() has already freed the skb */
++ percpu_stats->tx_dropped++;
++ return NETDEV_TX_OK;
++ }
++
++ if (skb_is_nonlinear(skb)) {
++ err = dpaa2_eth_build_sg_fd(priv, skb, &fd);
++ percpu_extras->tx_sg_frames++;
++ percpu_extras->tx_sg_bytes += skb->len;
++ } else {
++ err = dpaa2_eth_build_single_fd(priv, skb, &fd);
++ }
++
++ if (unlikely(err)) {
++ percpu_stats->tx_dropped++;
++ goto err_build_fd;
++ }
++
++ /* Tracing point */
++ trace_dpaa2_tx_fd(net_dev, &fd);
++
++ for (i = 0; i < (DPAA2_ETH_MAX_TX_QUEUES << 1); i++) {
++ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
++ priv->fq[queue_mapping].flowid,
++ &fd);
++ if (err != -EBUSY)
++ break;
++ }
++ percpu_extras->tx_portal_busy += i;
++ if (unlikely(err < 0)) {
++ netdev_dbg(net_dev, "error enqueueing Tx frame\n");
++ percpu_stats->tx_errors++;
++ /* Clean up everything, including freeing the skb */
++ dpaa2_eth_free_fd(priv, &fd, NULL);
++ } else {
++ percpu_stats->tx_packets++;
++ percpu_stats->tx_bytes += skb->len;
++ }
++
++ return NETDEV_TX_OK;
++
++err_build_fd:
++err_alloc_headroom:
++ dev_kfree_skb(skb);
++
++ return NETDEV_TX_OK;
++}
++
++static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi __always_unused)
++{
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_stats *percpu_extras;
++ u32 status = 0;
++
++ /* Tracing point */
++ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
++
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++ percpu_extras->tx_conf_frames++;
++ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
++
++ dpaa2_eth_free_fd(priv, fd, &status);
++
++ if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) {
++ netdev_err(priv->net_dev, "TxConf frame error(s): 0x%08x\n",
++ status & DPAA2_ETH_TXCONF_ERR_MASK);
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ /* Tx-conf logically pertains to the egress path. */
++ percpu_stats->tx_errors++;
++ }
++}
++
++static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
++{
++ int err;
++
++ err = dpni_set_l3_chksum_validation(priv->mc_io, 0, priv->mc_token,
++ enable);
++ if (err) {
++ netdev_err(priv->net_dev,
++ "dpni_set_l3_chksum_validation() failed\n");
++ return err;
++ }
++
++ err = dpni_set_l4_chksum_validation(priv->mc_io, 0, priv->mc_token,
++ enable);
++ if (err) {
++ netdev_err(priv->net_dev,
++ "dpni_set_l4_chksum_validation failed\n");
++ return err;
++ }
++
++ return 0;
++}
++
++static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
++{
++ struct dpaa2_eth_fq *fq;
++ struct dpni_tx_flow_cfg tx_flow_cfg;
++ int err;
++ int i;
++
++ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg));
++ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN |
++ DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN;
++ tx_flow_cfg.l3_chksum_gen = enable;
++ tx_flow_cfg.l4_chksum_gen = enable;
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ if (fq->type != DPAA2_TX_CONF_FQ)
++ continue;
++
++ /* The Tx flowid is kept in the corresponding TxConf FQ. */
++ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token,
++ &fq->flowid, &tx_flow_cfg);
++ if (err) {
++ netdev_err(priv->net_dev, "dpni_set_tx_flow failed\n");
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static int dpaa2_bp_add_7(struct dpaa2_eth_priv *priv, u16 bpid)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ u64 buf_array[7];
++ void *buf;
++ dma_addr_t addr;
++ int i;
++
++ for (i = 0; i < 7; i++) {
++ /* Allocate buffer visible to WRIOP + skb shared info +
++ * alignment padding
++ */
++ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
++ if (unlikely(!buf)) {
++ dev_err(dev, "buffer allocation failed\n");
++ goto err_alloc;
++ }
++ buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
++
++ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUFFER_SIZE,
++ DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(dev, addr))) {
++ dev_err(dev, "dma_map_single() failed\n");
++ goto err_map;
++ }
++ buf_array[i] = addr;
++
++ /* tracing point */
++ trace_dpaa2_eth_buf_seed(priv->net_dev,
++ buf, DPAA2_ETH_BUF_RAW_SIZE,
++ addr, DPAA2_ETH_RX_BUFFER_SIZE,
++ bpid);
++ }
++
++release_bufs:
++ /* In case the portal is busy, retry until successful.
++ * The buffer release function would only fail if the QBMan portal
++ * was busy, which implies portal contention (i.e. more CPUs than
++ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
++ * there is little we can realistically do, short of giving up -
++ * in which case we'd risk depleting the buffer pool and never again
++ * receiving the Rx interrupt which would kick-start the refill logic.
++ * So just keep retrying, at the risk of being moved to ksoftirqd.
++ */
++ while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
++ cpu_relax();
++ return i;
++
++err_map:
++ put_page(virt_to_head_page(buf));
++err_alloc:
++ if (i)
++ goto release_bufs;
++
++ return 0;
++}
++
++static int dpaa2_dpbp_seed(struct dpaa2_eth_priv *priv, u16 bpid)
++{
++ int i, j;
++ int new_count;
++
++ /* This is the lazy seeding of Rx buffer pools.
++ * dpaa2_bp_add_7() is also used on the Rx hotpath and calls
++ * napi_alloc_frag(). The trouble with that is that it in turn ends up
++ * calling this_cpu_ptr(), which mandates execution in atomic context.
++ * Rather than splitting up the code, do a one-off preempt disable.
++ */
++ preempt_disable();
++ for (j = 0; j < priv->num_channels; j++) {
++ for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += 7) {
++ new_count = dpaa2_bp_add_7(priv, bpid);
++ priv->channel[j]->buf_count += new_count;
++
++ if (new_count < 7) {
++ preempt_enable();
++ goto out_of_memory;
++ }
++ }
++ }
++ preempt_enable();
++
++ return 0;
++
++out_of_memory:
++ return -ENOMEM;
++}
++
++/**
++ * Drain the specified number of buffers from the DPNI's private buffer pool.
++ * @count must not exceeed 7
++ */
++static void dpaa2_dpbp_drain_cnt(struct dpaa2_eth_priv *priv, int count)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ u64 buf_array[7];
++ void *vaddr;
++ int ret, i;
++
++ do {
++ ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid,
++ buf_array, count);
++ if (ret < 0) {
++ pr_err("dpaa2_io_service_acquire() failed\n");
++ return;
++ }
++ for (i = 0; i < ret; i++) {
++ /* Same logic as on regular Rx path */
++ dma_unmap_single(dev, buf_array[i],
++ DPAA2_ETH_RX_BUFFER_SIZE,
++ DMA_FROM_DEVICE);
++ vaddr = phys_to_virt(buf_array[i]);
++ put_page(virt_to_head_page(vaddr));
++ }
++ } while (ret);
++}
++
++static void __dpaa2_dpbp_free(struct dpaa2_eth_priv *priv)
++{
++ int i;
++
++ dpaa2_dpbp_drain_cnt(priv, 7);
++ dpaa2_dpbp_drain_cnt(priv, 1);
++
++ for (i = 0; i < priv->num_channels; i++)
++ priv->channel[i]->buf_count = 0;
++}
++
++/* Function is called from softirq context only, so we don't need to guard
++ * the access to percpu count
++ */
++static int dpaa2_dpbp_refill(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ u16 bpid)
++{
++ int new_count;
++ int err = 0;
++
++ if (unlikely(ch->buf_count < DPAA2_ETH_REFILL_THRESH)) {
++ do {
++ new_count = dpaa2_bp_add_7(priv, bpid);
++ if (unlikely(!new_count)) {
++ /* Out of memory; abort for now, we'll
++ * try later on
++ */
++ break;
++ }
++ ch->buf_count += new_count;
++ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
++
++ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
++ err = -ENOMEM;
++ }
++
++ return err;
++}
++
++static int __dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
++{
++ int err;
++ int dequeues = -1;
++ struct dpaa2_eth_priv *priv = ch->priv;
++
++ /* Retry while portal is busy */
++ do {
++ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
++ dequeues++;
++ } while (err == -EBUSY);
++ if (unlikely(err))
++ netdev_err(priv->net_dev, "dpaa2_io_service_pull err %d", err);
++
++ ch->stats.dequeue_portal_busy += dequeues;
++ return err;
++}
++
++static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
++{
++ struct dpaa2_eth_channel *ch;
++ int cleaned = 0, store_cleaned;
++ struct dpaa2_eth_priv *priv;
++ int err;
++
++ ch = container_of(napi, struct dpaa2_eth_channel, napi);
++ priv = ch->priv;
++
++ __dpaa2_eth_pull_channel(ch);
++
++ do {
++ /* Refill pool if appropriate */
++ dpaa2_dpbp_refill(priv, ch, priv->dpbp_attrs.bpid);
++
++ store_cleaned = dpaa2_eth_store_consume(ch);
++ cleaned += store_cleaned;
++
++ if (store_cleaned == 0 ||
++ cleaned > budget - DPAA2_ETH_STORE_SIZE)
++ break;
++
++ /* Try to dequeue some more */
++ err = __dpaa2_eth_pull_channel(ch);
++ if (unlikely(err))
++ break;
++ } while (1);
++
++ if (cleaned < budget) {
++ napi_complete_done(napi, cleaned);
++ err = dpaa2_io_service_rearm(NULL, &ch->nctx);
++ if (unlikely(err))
++ netdev_err(priv->net_dev,
++ "Notif rearm failed for channel %d\n",
++ ch->ch_id);
++ }
++
++ ch->stats.frames += cleaned;
++
++ return cleaned;
++}
++
++static void dpaa2_eth_napi_enable(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_eth_channel *ch;
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ napi_enable(&ch->napi);
++ }
++}
++
++static void dpaa2_eth_napi_disable(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_eth_channel *ch;
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ napi_disable(&ch->napi);
++ }
++}
++
++static int dpaa2_link_state_update(struct dpaa2_eth_priv *priv)
++{
++ struct dpni_link_state state;
++ int err;
++
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (unlikely(err)) {
++ netdev_err(priv->net_dev,
++ "dpni_get_link_state() failed\n");
++ return err;
++ }
++
++ /* Chech link state; speed / duplex changes are not treated yet */
++ if (priv->link_state.up == state.up)
++ return 0;
++
++ priv->link_state = state;
++ if (state.up) {
++ netif_carrier_on(priv->net_dev);
++ netif_tx_start_all_queues(priv->net_dev);
++ } else {
++ netif_tx_stop_all_queues(priv->net_dev);
++ netif_carrier_off(priv->net_dev);
++ }
++
++ netdev_info(priv->net_dev, "Link Event: state %s",
++ state.up ? "up" : "down");
++
++ return 0;
++}
++
++static int dpaa2_eth_open(struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
++
++ err = dpaa2_dpbp_seed(priv, priv->dpbp_attrs.bpid);
++ if (err) {
++ /* Not much to do; the buffer pool, though not filled up,
++ * may still contain some buffers which would enable us
++ * to limp on.
++ */
++ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
++ priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid);
++ }
++
++ /* We'll only start the txqs when the link is actually ready; make sure
++ * we don't race against the link up notification, which may come
++ * immediately after dpni_enable();
++ */
++ netif_tx_stop_all_queues(net_dev);
++ dpaa2_eth_napi_enable(priv);
++ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
++ * return true and cause 'ip link show' to report the LOWER_UP flag,
++ * even though the link notification wasn't even received.
++ */
++ netif_carrier_off(net_dev);
++
++ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
++ if (err < 0) {
++ dev_err(net_dev->dev.parent, "dpni_enable() failed\n");
++ goto enable_err;
++ }
++
++ /* If the DPMAC object has already processed the link up interrupt,
++ * we have to learn the link state ourselves.
++ */
++ err = dpaa2_link_state_update(priv);
++ if (err < 0) {
++ dev_err(net_dev->dev.parent, "Can't update link state\n");
++ goto link_state_err;
++ }
++
++ return 0;
++
++link_state_err:
++enable_err:
++ dpaa2_eth_napi_disable(priv);
++ __dpaa2_dpbp_free(priv);
++ return err;
++}
++
++static int dpaa2_eth_stop(struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++
++ /* Stop Tx and Rx traffic */
++ netif_tx_stop_all_queues(net_dev);
++ netif_carrier_off(net_dev);
++ dpni_disable(priv->mc_io, 0, priv->mc_token);
++
++ msleep(500);
++
++ dpaa2_eth_napi_disable(priv);
++ msleep(100);
++
++ __dpaa2_dpbp_free(priv);
++
++ return 0;
++}
++
++static int dpaa2_eth_init(struct net_device *net_dev)
++{
++ u64 supported = 0;
++ u64 not_supported = 0;
++ const struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ u32 options = priv->dpni_attrs.options;
++
++ /* Capabilities listing */
++ supported |= IFF_LIVE_ADDR_CHANGE | IFF_PROMISC | IFF_ALLMULTI;
++
++ if (options & DPNI_OPT_UNICAST_FILTER)
++ supported |= IFF_UNICAST_FLT;
++ else
++ not_supported |= IFF_UNICAST_FLT;
++
++ if (options & DPNI_OPT_MULTICAST_FILTER)
++ supported |= IFF_MULTICAST;
++ else
++ not_supported |= IFF_MULTICAST;
++
++ net_dev->priv_flags |= supported;
++ net_dev->priv_flags &= ~not_supported;
++
++ /* Features */
++ net_dev->features = NETIF_F_RXCSUM |
++ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
++ NETIF_F_SG | NETIF_F_HIGHDMA |
++ NETIF_F_LLTX;
++ net_dev->hw_features = net_dev->features;
++
++ return 0;
++}
++
++static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++ int err;
++
++ err = eth_mac_addr(net_dev, addr);
++ if (err < 0) {
++ dev_err(dev, "eth_mac_addr() failed with error %d\n", err);
++ return err;
++ }
++
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ net_dev->dev_addr);
++ if (err) {
++ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
++ return err;
++ }
++
++ return 0;
++}
++
++/** Fill in counters maintained by the GPP driver. These may be different from
++ * the hardware counters obtained by ethtool.
++ */
++static struct rtnl_link_stats64
++*dpaa2_eth_get_stats(struct net_device *net_dev,
++ struct rtnl_link_stats64 *stats)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct rtnl_link_stats64 *percpu_stats;
++ u64 *cpustats;
++ u64 *netstats = (u64 *)stats;
++ int i, j;
++ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
++
++ for_each_possible_cpu(i) {
++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
++ cpustats = (u64 *)percpu_stats;
++ for (j = 0; j < num; j++)
++ netstats[j] += cpustats[j];
++ }
++
++ return stats;
++}
++
++static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
++
++ if (mtu < 68 || mtu > DPAA2_ETH_MAX_MTU) {
++ netdev_err(net_dev, "Invalid MTU %d. Valid range is: 68..%d\n",
++ mtu, DPAA2_ETH_MAX_MTU);
++ return -EINVAL;
++ }
++
++ /* Set the maximum Rx frame length to match the transmit side;
++ * account for L2 headers when computing the MFL
++ */
++ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
++ (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
++ if (err) {
++ netdev_err(net_dev, "dpni_set_mfl() failed\n");
++ return err;
++ }
++
++ net_dev->mtu = mtu;
++ return 0;
++}
++
++/* Convenience macro to make code littered with error checking more readable */
++#define DPAA2_ETH_WARN_IF_ERR(err, netdevp, format, ...) \
++do { \
++ if (err) \
++ netdev_warn(netdevp, format, ##__VA_ARGS__); \
++} while (0)
++
++/* Copy mac unicast addresses from @net_dev to @priv.
++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
++ */
++static void _dpaa2_eth_hw_add_uc_addr(const struct net_device *net_dev,
++ struct dpaa2_eth_priv *priv)
++{
++ struct netdev_hw_addr *ha;
++ int err;
++
++ netdev_for_each_uc_addr(ha, net_dev) {
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
++ ha->addr);
++ DPAA2_ETH_WARN_IF_ERR(err, priv->net_dev,
++ "Could not add ucast MAC %pM to the filtering table (err %d)\n",
++ ha->addr, err);
++ }
++}
++
++/* Copy mac multicast addresses from @net_dev to @priv
++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
++ */
++static void _dpaa2_eth_hw_add_mc_addr(const struct net_device *net_dev,
++ struct dpaa2_eth_priv *priv)
++{
++ struct netdev_hw_addr *ha;
++ int err;
++
++ netdev_for_each_mc_addr(ha, net_dev) {
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
++ ha->addr);
++ DPAA2_ETH_WARN_IF_ERR(err, priv->net_dev,
++ "Could not add mcast MAC %pM to the filtering table (err %d)\n",
++ ha->addr, err);
++ }
++}
++
++static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int uc_count = netdev_uc_count(net_dev);
++ int mc_count = netdev_mc_count(net_dev);
++ u8 max_uc = priv->dpni_attrs.max_unicast_filters;
++ u8 max_mc = priv->dpni_attrs.max_multicast_filters;
++ u32 options = priv->dpni_attrs.options;
++ u16 mc_token = priv->mc_token;
++ struct fsl_mc_io *mc_io = priv->mc_io;
++ int err;
++
++ /* Basic sanity checks; these probably indicate a misconfiguration */
++ if (!(options & DPNI_OPT_UNICAST_FILTER) && max_uc != 0)
++ netdev_info(net_dev,
++ "max_unicast_filters=%d, you must have DPNI_OPT_UNICAST_FILTER in the DPL\n",
++ max_uc);
++ if (!(options & DPNI_OPT_MULTICAST_FILTER) && max_mc != 0)
++ netdev_info(net_dev,
++ "max_multicast_filters=%d, you must have DPNI_OPT_MULTICAST_FILTER in the DPL\n",
++ max_mc);
++
++ /* Force promiscuous if the uc or mc counts exceed our capabilities. */
++ if (uc_count > max_uc) {
++ netdev_info(net_dev,
++ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
++ uc_count, max_uc);
++ goto force_promisc;
++ }
++ if (mc_count > max_mc) {
++ netdev_info(net_dev,
++ "Multicast addr count reached %d, max allowed is %d; forcing promisc\n",
++ mc_count, max_mc);
++ goto force_mc_promisc;
++ }
++
++ /* Adjust promisc settings due to flag combinations */
++ if (net_dev->flags & IFF_PROMISC) {
++ goto force_promisc;
++ } else if (net_dev->flags & IFF_ALLMULTI) {
++ /* First, rebuild unicast filtering table. This should be done
++ * in promisc mode, in order to avoid frame loss while we
++ * progressively add entries to the table.
++ * We don't know whether we had been in promisc already, and
++ * making an MC call to find it is expensive; so set uc promisc
++ * nonetheless.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
++ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set uc promisc\n");
++
++ /* Actual uc table reconstruction. */
++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
++ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear uc filters\n");
++ _dpaa2_eth_hw_add_uc_addr(net_dev, priv);
++
++ /* Finally, clear uc promisc and set mc promisc as requested. */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
++ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear uc promisc\n");
++ goto force_mc_promisc;
++ }
++
++ /* Neither unicast, nor multicast promisc will be on... eventually.
++ * For now, rebuild mac filtering tables while forcing both of them on.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
++ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set uc promisc (%d)\n", err);
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
++ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set mc promisc (%d)\n", err);
++
++ /* Actual mac filtering tables reconstruction */
++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
++ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear mac filters\n");
++ _dpaa2_eth_hw_add_mc_addr(net_dev, priv);
++ _dpaa2_eth_hw_add_uc_addr(net_dev, priv);
++
++ /* Now we can clear both ucast and mcast promisc, without risking
++ * to drop legitimate frames anymore.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
++ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear ucast promisc\n");
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
++ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear mcast promisc\n");
++
++ return;
++
++force_promisc:
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
++ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set ucast promisc\n");
++force_mc_promisc:
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
++ DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set mcast promisc\n");
++}
++
++static int dpaa2_eth_set_features(struct net_device *net_dev,
++ netdev_features_t features)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ netdev_features_t changed = features ^ net_dev->features;
++ int err;
++
++ if (changed & NETIF_F_RXCSUM) {
++ bool enable = !!(features & NETIF_F_RXCSUM);
++
++ err = dpaa2_eth_set_rx_csum(priv, enable);
++ if (err)
++ return err;
++ }
++
++ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
++ bool enable = !!(features &
++ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
++ err = dpaa2_eth_set_tx_csum(priv, enable);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++ struct hwtstamp_config config;
++
++ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
++ return -EFAULT;
++
++ switch (config.tx_type) {
++ case HWTSTAMP_TX_OFF:
++ priv->ts_tx_en = false;
++ break;
++ case HWTSTAMP_TX_ON:
++ priv->ts_tx_en = true;
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ if (config.rx_filter == HWTSTAMP_FILTER_NONE)
++ priv->ts_rx_en = false;
++ else {
++ priv->ts_rx_en = true;
++ /* TS is set for all frame types, not only those requested */
++ config.rx_filter = HWTSTAMP_FILTER_ALL;
++ }
++
++ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
++ -EFAULT : 0;
++}
++
++static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ if (cmd == SIOCSHWTSTAMP)
++ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
++ else
++ return -EINVAL;
++}
++
++static const struct net_device_ops dpaa2_eth_ops = {
++ .ndo_open = dpaa2_eth_open,
++ .ndo_start_xmit = dpaa2_eth_tx,
++ .ndo_stop = dpaa2_eth_stop,
++ .ndo_init = dpaa2_eth_init,
++ .ndo_set_mac_address = dpaa2_eth_set_addr,
++ .ndo_get_stats64 = dpaa2_eth_get_stats,
++ .ndo_change_mtu = dpaa2_eth_change_mtu,
++ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
++ .ndo_set_features = dpaa2_eth_set_features,
++ .ndo_do_ioctl = dpaa2_eth_ioctl,
++};
++
++static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
++{
++ struct dpaa2_eth_channel *ch;
++
++ ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
++
++ /* Update NAPI statistics */
++ ch->stats.cdan++;
++
++ napi_schedule_irqoff(&ch->napi);
++}
++
++static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
++{
++ int i;
++
++ /* We have one TxConf FQ per Tx flow */
++ for (i = 0; i < priv->dpni_attrs.max_senders; i++) {
++ priv->fq[priv->num_fqs].netdev_priv = priv;
++ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
++ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
++ priv->fq[priv->num_fqs++].flowid = DPNI_NEW_FLOW_ID;
++ }
++
++ /* The number of Rx queues (Rx distribution width) may be different from
++ * the number of cores.
++ * We only support one traffic class for now.
++ */
++ for (i = 0; i < dpaa2_queue_count(priv); i++) {
++ priv->fq[priv->num_fqs].netdev_priv = priv;
++ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
++ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
++ priv->fq[priv->num_fqs++].flowid = (u16)i;
++ }
++
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ /* We have exactly one Rx error queue per DPNI */
++ priv->fq[priv->num_fqs].netdev_priv = priv;
++ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
++ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
++#endif
++}
++
++static int check_obj_version(struct fsl_mc_device *ls_dev, u16 mc_version)
++{
++ char *name = ls_dev->obj_desc.type;
++ struct device *dev = &ls_dev->dev;
++ u16 supported_version, flib_version;
++
++ if (strcmp(name, "dpni") == 0) {
++ flib_version = DPNI_VER_MAJOR;
++ supported_version = DPAA2_SUPPORTED_DPNI_VERSION;
++ } else if (strcmp(name, "dpbp") == 0) {
++ flib_version = DPBP_VER_MAJOR;
++ supported_version = DPAA2_SUPPORTED_DPBP_VERSION;
++ } else if (strcmp(name, "dpcon") == 0) {
++ flib_version = DPCON_VER_MAJOR;
++ supported_version = DPAA2_SUPPORTED_DPCON_VERSION;
++ } else {
++ dev_err(dev, "invalid object type (%s)\n", name);
++ return -EINVAL;
++ }
++
++ /* Check that the FLIB-defined version matches the one reported by MC */
++ if (mc_version != flib_version) {
++ dev_err(dev,
++ "%s FLIB version mismatch: MC reports %d, we have %d\n",
++ name, mc_version, flib_version);
++ return -EINVAL;
++ }
++
++ /* ... and that we actually support it */
++ if (mc_version < supported_version) {
++ dev_err(dev, "Unsupported %s FLIB version (%d)\n",
++ name, mc_version);
++ return -EINVAL;
++ }
++ dev_dbg(dev, "Using %s FLIB version %d\n", name, mc_version);
++
++ return 0;
++}
++
++static struct fsl_mc_device *dpaa2_dpcon_setup(struct dpaa2_eth_priv *priv)
++{
++ struct fsl_mc_device *dpcon;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpcon_attr attrs;
++ int err;
++
++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
++ FSL_MC_POOL_DPCON, &dpcon);
++ if (err) {
++ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
++ return NULL;
++ }
++
++ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_open() failed\n");
++ goto err_open;
++ }
++
++ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
++ if (err) {
++ dev_err(dev, "dpcon_get_attributes() failed\n");
++ goto err_get_attr;
++ }
++
++ err = check_obj_version(dpcon, attrs.version.major);
++ if (err)
++ goto err_dpcon_ver;
++
++ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_enable() failed\n");
++ goto err_enable;
++ }
++
++ return dpcon;
++
++err_enable:
++err_dpcon_ver:
++err_get_attr:
++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
++err_open:
++ fsl_mc_object_free(dpcon);
++
++ return NULL;
++}
++
++static void dpaa2_dpcon_free(struct dpaa2_eth_priv *priv,
++ struct fsl_mc_device *dpcon)
++{
++ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
++ fsl_mc_object_free(dpcon);
++}
++
++static struct dpaa2_eth_channel *
++dpaa2_alloc_channel(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_eth_channel *channel;
++ struct dpcon_attr attr;
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
++
++ channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
++ if (!channel) {
++ dev_err(dev, "Memory allocation failed\n");
++ return NULL;
++ }
++
++ channel->dpcon = dpaa2_dpcon_setup(priv);
++ if (!channel->dpcon)
++ goto err_setup;
++
++ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
++ &attr);
++ if (err) {
++ dev_err(dev, "dpcon_get_attributes() failed\n");
++ goto err_get_attr;
++ }
++
++ channel->dpcon_id = attr.id;
++ channel->ch_id = attr.qbman_ch_id;
++ channel->priv = priv;
++
++ return channel;
++
++err_get_attr:
++ dpaa2_dpcon_free(priv, channel->dpcon);
++err_setup:
++ kfree(channel);
++ return NULL;
++}
++
++static void dpaa2_free_channel(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *channel)
++{
++ dpaa2_dpcon_free(priv, channel->dpcon);
++ kfree(channel);
++}
++
++static int dpaa2_dpio_setup(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_io_notification_ctx *nctx;
++ struct dpaa2_eth_channel *channel;
++ struct dpcon_notification_cfg dpcon_notif_cfg;
++ struct device *dev = priv->net_dev->dev.parent;
++ int i, err;
++
++ /* Don't allocate more channels than strictly necessary and assign
++ * them to cores starting from the first one available in
++ * cpu_online_mask.
++ * If the number of channels is lower than the number of cores,
++ * there will be no rx/tx conf processing on the last cores in the mask.
++ */
++ cpumask_clear(&priv->dpio_cpumask);
++ for_each_online_cpu(i) {
++ /* Try to allocate a channel */
++ channel = dpaa2_alloc_channel(priv);
++ if (!channel)
++ goto err_alloc_ch;
++
++ priv->channel[priv->num_channels] = channel;
++
++ nctx = &channel->nctx;
++ nctx->is_cdan = 1;
++ nctx->cb = dpaa2_eth_cdan_cb;
++ nctx->id = channel->ch_id;
++ nctx->desired_cpu = i;
++
++ /* Register the new context */
++ err = dpaa2_io_service_register(NULL, nctx);
++ if (err) {
++ dev_info(dev, "No affine DPIO for core %d\n", i);
++ /* This core doesn't have an affine DPIO, but there's
++ * a chance another one does, so keep trying
++ */
++ dpaa2_free_channel(priv, channel);
++ continue;
++ }
++
++ /* Register DPCON notification with MC */
++ dpcon_notif_cfg.dpio_id = nctx->dpio_id;
++ dpcon_notif_cfg.priority = 0;
++ dpcon_notif_cfg.user_ctx = nctx->qman64;
++ err = dpcon_set_notification(priv->mc_io, 0,
++ channel->dpcon->mc_handle,
++ &dpcon_notif_cfg);
++ if (err) {
++ dev_err(dev, "dpcon_set_notification failed()\n");
++ goto err_set_cdan;
++ }
++
++ /* If we managed to allocate a channel and also found an affine
++ * DPIO for this core, add it to the final mask
++ */
++ cpumask_set_cpu(i, &priv->dpio_cpumask);
++ priv->num_channels++;
++
++ if (priv->num_channels == dpaa2_max_channels(priv))
++ break;
++ }
++
++ /* Tx confirmation queues can only be serviced by cpus
++ * with an affine DPIO/channel
++ */
++ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
++
++ return 0;
++
++err_set_cdan:
++ dpaa2_io_service_deregister(NULL, nctx);
++ dpaa2_free_channel(priv, channel);
++err_alloc_ch:
++ if (cpumask_empty(&priv->dpio_cpumask)) {
++ dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
++ return -ENODEV;
++ }
++ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
++
++ return 0;
++}
++
++static void dpaa2_dpio_free(struct dpaa2_eth_priv *priv)
++{
++ int i;
++ struct dpaa2_eth_channel *ch;
++
++ /* deregister CDAN notifications and free channels */
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ dpaa2_io_service_deregister(NULL, &ch->nctx);
++ dpaa2_free_channel(priv, ch);
++ }
++}
++
++static struct dpaa2_eth_channel *
++dpaa2_get_channel_by_cpu(struct dpaa2_eth_priv *priv, int cpu)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++)
++ if (priv->channel[i]->nctx.desired_cpu == cpu)
++ return priv->channel[i];
++
++ /* We should never get here. Issue a warning and return
++ * the first channel, because it's still better than nothing
++ */
++ dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
++
++ return priv->channel[0];
++}
++
++static void dpaa2_set_fq_affinity(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpaa2_eth_fq *fq;
++ int rx_cpu, txconf_cpu;
++ int i;
++
++ /* For each FQ, pick one channel/CPU to deliver frames to.
++ * This may well change at runtime, either through irqbalance or
++ * through direct user intervention.
++ */
++ rx_cpu = cpumask_first(&priv->dpio_cpumask);
++ txconf_cpu = cpumask_first(&priv->txconf_cpumask);
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ switch (fq->type) {
++ case DPAA2_RX_FQ:
++ case DPAA2_RX_ERR_FQ:
++ fq->target_cpu = rx_cpu;
++ cpumask_rr(rx_cpu, &priv->dpio_cpumask);
++ break;
++ case DPAA2_TX_CONF_FQ:
++ fq->target_cpu = txconf_cpu;
++ cpumask_rr(txconf_cpu, &priv->txconf_cpumask);
++ break;
++ default:
++ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
++ }
++ fq->channel = dpaa2_get_channel_by_cpu(priv, fq->target_cpu);
++ }
++}
++
++static int dpaa2_dpbp_setup(struct dpaa2_eth_priv *priv)
++{
++ int err;
++ struct fsl_mc_device *dpbp_dev;
++ struct device *dev = priv->net_dev->dev.parent;
++
++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
++ &dpbp_dev);
++ if (err) {
++ dev_err(dev, "DPBP device allocation failed\n");
++ return err;
++ }
++
++ priv->dpbp_dev = dpbp_dev;
++
++ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
++ &dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_open() failed\n");
++ goto err_open;
++ }
++
++ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_enable() failed\n");
++ goto err_enable;
++ }
++
++ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
++ &priv->dpbp_attrs);
++ if (err) {
++ dev_err(dev, "dpbp_get_attributes() failed\n");
++ goto err_get_attr;
++ }
++
++ err = check_obj_version(dpbp_dev, priv->dpbp_attrs.version.major);
++ if (err)
++ goto err_dpbp_ver;
++
++ return 0;
++
++err_dpbp_ver:
++err_get_attr:
++ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
++err_enable:
++ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
++err_open:
++ fsl_mc_object_free(dpbp_dev);
++
++ return err;
++}
++
++static void dpaa2_dpbp_free(struct dpaa2_eth_priv *priv)
++{
++ __dpaa2_dpbp_free(priv);
++ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
++ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
++ fsl_mc_object_free(priv->dpbp_dev);
++}
++
++static int dpaa2_dpni_setup(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev = &ls_dev->dev;
++ struct dpaa2_eth_priv *priv;
++ struct net_device *net_dev;
++ void *dma_mem;
++ int err;
++
++ net_dev = dev_get_drvdata(dev);
++ priv = netdev_priv(net_dev);
++
++ priv->dpni_id = ls_dev->obj_desc.id;
++
++ /* and get a handle for the DPNI this interface is associate with */
++ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
++ if (err) {
++ dev_err(dev, "dpni_open() failed\n");
++ goto err_open;
++ }
++
++ ls_dev->mc_io = priv->mc_io;
++ ls_dev->mc_handle = priv->mc_token;
++
++ dma_mem = kzalloc(DPAA2_EXT_CFG_SIZE, GFP_DMA | GFP_KERNEL);
++ if (!dma_mem)
++ goto err_alloc;
++
++ priv->dpni_attrs.ext_cfg_iova = dma_map_single(dev, dma_mem,
++ DPAA2_EXT_CFG_SIZE,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, priv->dpni_attrs.ext_cfg_iova)) {
++ dev_err(dev, "dma mapping for dpni_ext_cfg failed\n");
++ goto err_dma_map;
++ }
++
++ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
++ &priv->dpni_attrs);
++ if (err) {
++ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
++ dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova,
++ DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE);
++ goto err_get_attr;
++ }
++
++ err = check_obj_version(ls_dev, priv->dpni_attrs.version.major);
++ if (err)
++ goto err_dpni_ver;
++
++ dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova,
++ DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE);
++
++ memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg));
++ err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem);
++ if (err) {
++ dev_err(dev, "dpni_extract_extended_cfg() failed\n");
++ goto err_extract;
++ }
++
++ /* Configure our buffers' layout */
++ priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
++ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
++ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
++ priv->buf_layout.pass_parser_result = true;
++ priv->buf_layout.pass_frame_status = true;
++ priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
++ /* HW erratum mandates data alignment in multiples of 256 */
++ priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
++ /* ...rx, ... */
++ err = dpni_set_rx_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ &priv->buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_buffer_layout() failed");
++ goto err_buf_layout;
++ }
++ /* ... tx, ... */
++ /* remove Rx-only options */
++ priv->buf_layout.options &= ~(DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
++ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT);
++ err = dpni_set_tx_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ &priv->buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_tx_buffer_layout() failed");
++ goto err_buf_layout;
++ }
++ /* ... tx-confirm. */
++ priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
++ priv->buf_layout.options |= DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
++ priv->buf_layout.pass_timestamp = 1;
++ err = dpni_set_tx_conf_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ &priv->buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_tx_conf_buffer_layout() failed");
++ goto err_buf_layout;
++ }
++ /* Now that we've set our tx buffer layout, retrieve the minimum
++ * required tx data offset.
++ */
++ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
++ &priv->tx_data_offset);
++ if (err) {
++ dev_err(dev, "dpni_get_tx_data_offset() failed\n");
++ goto err_data_offset;
++ }
++
++ /* Warn in case TX data offset is not multiple of 64 bytes. */
++ WARN_ON(priv->tx_data_offset % 64);
++
++ /* Accommodate SWA space. */
++ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
++
++ /* allocate classification rule space */
++ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
++ DPAA2_CLASSIFIER_ENTRY_COUNT, GFP_KERNEL);
++ if (!priv->cls_rule)
++ goto err_cls_rule;
++
++ kfree(dma_mem);
++
++ return 0;
++
++err_cls_rule:
++err_data_offset:
++err_buf_layout:
++err_extract:
++err_dpni_ver:
++err_get_attr:
++err_dma_map:
++ kfree(dma_mem);
++err_alloc:
++ dpni_close(priv->mc_io, 0, priv->mc_token);
++err_open:
++ return err;
++}
++
++static void dpaa2_dpni_free(struct dpaa2_eth_priv *priv)
++{
++ int err;
++
++ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
++ if (err)
++ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
++ err);
++
++ dpni_close(priv->mc_io, 0, priv->mc_token);
++}
++
++static int dpaa2_rx_flow_setup(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue_attr rx_queue_attr;
++ struct dpni_queue_cfg queue_cfg;
++ int err;
++
++ memset(&queue_cfg, 0, sizeof(queue_cfg));
++ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
++ DPNI_QUEUE_OPT_TAILDROP_THRESHOLD;
++ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON;
++ queue_cfg.dest_cfg.priority = 1;
++ queue_cfg.user_ctx = (u64)fq;
++ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id;
++ queue_cfg.tail_drop_threshold = DPAA2_ETH_TAILDROP_THRESH;
++ err = dpni_set_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid,
++ &queue_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_flow() failed\n");
++ return err;
++ }
++
++ /* Get the actual FQID that was assigned by MC */
++ err = dpni_get_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid,
++ &rx_queue_attr);
++ if (err) {
++ dev_err(dev, "dpni_get_rx_flow() failed\n");
++ return err;
++ }
++ fq->fqid = rx_queue_attr.fqid;
++
++ return 0;
++}
++
++static int dpaa2_tx_flow_setup(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_tx_flow_cfg tx_flow_cfg;
++ struct dpni_tx_conf_cfg tx_conf_cfg;
++ struct dpni_tx_conf_attr tx_conf_attr;
++ int err;
++
++ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg));
++ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_TX_CONF_ERROR;
++ tx_flow_cfg.use_common_tx_conf_queue = 0;
++ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token,
++ &fq->flowid, &tx_flow_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_tx_flow() failed\n");
++ return err;
++ }
++
++ tx_conf_cfg.errors_only = 0;
++ tx_conf_cfg.queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX |
++ DPNI_QUEUE_OPT_DEST;
++ tx_conf_cfg.queue_cfg.user_ctx = (u64)fq;
++ tx_conf_cfg.queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON;
++ tx_conf_cfg.queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id;
++ tx_conf_cfg.queue_cfg.dest_cfg.priority = 0;
++
++ err = dpni_set_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid,
++ &tx_conf_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_tx_conf() failed\n");
++ return err;
++ }
++
++ err = dpni_get_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid,
++ &tx_conf_attr);
++ if (err) {
++ dev_err(dev, "dpni_get_tx_conf() failed\n");
++ return err;
++ }
++
++ fq->fqid = tx_conf_attr.queue_attr.fqid;
++
++ return 0;
++}
++
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++static int dpaa2_rx_err_setup(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
++{
++ struct dpni_queue_attr queue_attr;
++ struct dpni_queue_cfg queue_cfg;
++ int err;
++
++ /* Configure the Rx error queue to generate CDANs,
++ * just like the Rx queues */
++ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
++ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON;
++ queue_cfg.dest_cfg.priority = 1;
++ queue_cfg.user_ctx = (u64)fq;
++ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id;
++ err = dpni_set_rx_err_queue(priv->mc_io, 0, priv->mc_token, &queue_cfg);
++ if (err) {
++ netdev_err(priv->net_dev, "dpni_set_rx_err_queue() failed\n");
++ return err;
++ }
++
++ /* Get the FQID */
++ err = dpni_get_rx_err_queue(priv->mc_io, 0, priv->mc_token, &queue_attr);
++ if (err) {
++ netdev_err(priv->net_dev, "dpni_get_rx_err_queue() failed\n");
++ return err;
++ }
++ fq->fqid = queue_attr.fqid;
++
++ return 0;
++}
++#endif
++
++static int dpaa2_dpni_bind(struct dpaa2_eth_priv *priv)
++{
++ struct net_device *net_dev = priv->net_dev;
++ struct device *dev = net_dev->dev.parent;
++ struct dpni_pools_cfg pools_params;
++ struct dpni_error_cfg err_cfg;
++ int err = 0;
++ int i;
++
++ pools_params.num_dpbp = 1;
++ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
++ pools_params.pools[0].backup_pool = 0;
++ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUFFER_SIZE;
++ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
++ if (err) {
++ dev_err(dev, "dpni_set_pools() failed\n");
++ return err;
++ }
++
++ dpaa2_cls_check(net_dev);
++
++ /* have the interface implicitly distribute traffic based on supported
++ * header fields
++ */
++ if (dpaa2_eth_hash_enabled(priv)) {
++ err = dpaa2_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
++ if (err)
++ return err;
++ }
++
++ /* Configure handling of error frames */
++ err_cfg.errors = DPAA2_ETH_RX_ERR_MASK;
++ err_cfg.set_frame_annotation = 1;
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
++#else
++ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
++#endif
++ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
++ &err_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_errors_behavior failed\n");
++ return err;
++ }
++
++ /* Configure Rx and Tx conf queues to generate CDANs */
++ for (i = 0; i < priv->num_fqs; i++) {
++ switch (priv->fq[i].type) {
++ case DPAA2_RX_FQ:
++ err = dpaa2_rx_flow_setup(priv, &priv->fq[i]);
++ break;
++ case DPAA2_TX_CONF_FQ:
++ err = dpaa2_tx_flow_setup(priv, &priv->fq[i]);
++ break;
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ case DPAA2_RX_ERR_FQ:
++ err = dpaa2_rx_err_setup(priv, &priv->fq[i]);
++ break;
++#endif
++ default:
++ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
++ return -EINVAL;
++ }
++ if (err)
++ return err;
++ }
++
++ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, &priv->tx_qdid);
++ if (err) {
++ dev_err(dev, "dpni_get_qdid() failed\n");
++ return err;
++ }
++
++ return 0;
++}
++
++static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
++{
++ struct net_device *net_dev = priv->net_dev;
++ struct device *dev = net_dev->dev.parent;
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ priv->channel[i]->store =
++ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
++ if (!priv->channel[i]->store) {
++ netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
++ goto err_ring;
++ }
++ }
++
++ return 0;
++
++err_ring:
++ for (i = 0; i < priv->num_channels; i++) {
++ if (!priv->channel[i]->store)
++ break;
++ dpaa2_io_store_destroy(priv->channel[i]->store);
++ }
++
++ return -ENOMEM;
++}
++
++static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
++{
++ int i;
++
++ for (i = 0; i < priv->num_channels; i++)
++ dpaa2_io_store_destroy(priv->channel[i]->store);
++}
++
++static int dpaa2_eth_netdev_init(struct net_device *net_dev)
++{
++ int err;
++ struct device *dev = net_dev->dev.parent;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ u8 mac_addr[ETH_ALEN];
++ u8 bcast_addr[ETH_ALEN];
++
++ net_dev->netdev_ops = &dpaa2_eth_ops;
++
++ /* If the DPL contains all-0 mac_addr, set a random hardware address */
++ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ mac_addr);
++ if (err) {
++ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)", err);
++ return err;
++ }
++ if (is_zero_ether_addr(mac_addr)) {
++ /* Fills in net_dev->dev_addr, as required by
++ * register_netdevice()
++ */
++ eth_hw_addr_random(net_dev);
++ /* Make the user aware, without cluttering the boot log */
++ pr_info_once(KBUILD_MODNAME " device(s) have all-zero hwaddr, replaced with random");
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ net_dev->dev_addr);
++ if (err) {
++ dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err);
++ return err;
++ }
++ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
++ * practical purposes, this will be our "permanent" mac address,
++ * at least until the next reboot. This move will also permit
++ * register_netdevice() to properly fill up net_dev->perm_addr.
++ */
++ net_dev->addr_assign_type = NET_ADDR_PERM;
++ } else {
++ /* NET_ADDR_PERM is default, all we have to do is
++ * fill in the device addr.
++ */
++ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
++ }
++
++ /* Explicitly add the broadcast address to the MAC filtering table;
++ * the MC won't do that for us.
++ */
++ eth_broadcast_addr(bcast_addr);
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
++ if (err) {
++ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
++ /* Won't return an error; at least, we'd have egress traffic */
++ }
++
++ /* Reserve enough space to align buffer as per hardware requirement;
++ * NOTE: priv->tx_data_offset MUST be initialized at this point.
++ */
++ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
++
++ /* Our .ndo_init will be called herein */
++ err = register_netdev(net_dev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev() = %d\n", err);
++ return err;
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL
++static int dpaa2_poll_link_state(void *arg)
++{
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
++ int err;
++
++ while (!kthread_should_stop()) {
++ err = dpaa2_link_state_update(priv);
++ if (unlikely(err))
++ return err;
++
++ msleep(DPAA2_ETH_LINK_STATE_REFRESH);
++ }
++
++ return 0;
++}
++#else
++static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
++{
++ u8 irq_index = DPNI_IRQ_INDEX;
++ u32 status, clear = 0;
++ struct device *dev = (struct device *)arg;
++ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
++ struct net_device *net_dev = dev_get_drvdata(dev);
++ int err;
++
++ netdev_dbg(net_dev, "IRQ %d received\n", irq_num);
++ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
++ irq_index, &status);
++ if (unlikely(err)) {
++ netdev_err(net_dev, "Can't get irq status (err %d)", err);
++ clear = 0xffffffff;
++ goto out;
++ }
++
++ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
++ clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
++ dpaa2_link_state_update(netdev_priv(net_dev));
++ }
++
++out:
++ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
++ irq_index, clear);
++ return IRQ_HANDLED;
++}
++
++static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
++{
++ int err = 0;
++ struct fsl_mc_device_irq *irq;
++ int irq_count = ls_dev->obj_desc.irq_count;
++ u8 irq_index = DPNI_IRQ_INDEX;
++ u32 mask = DPNI_IRQ_EVENT_LINK_CHANGED;
++
++ /* The only interrupt supported now is the link state notification. */
++ if (WARN_ON(irq_count != 1))
++ return -EINVAL;
++
++ irq = ls_dev->irqs[0];
++ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
++ dpni_irq0_handler,
++ dpni_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(&ls_dev->dev), &ls_dev->dev);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
++ return err;
++ }
++
++ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
++ irq_index, mask);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
++ return err;
++ }
++
++ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
++ irq_index, 1);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
++ return err;
++ }
++
++ return 0;
++}
++#endif
++
++static void dpaa2_eth_napi_add(struct dpaa2_eth_priv *priv)
++{
++ int i;
++ struct dpaa2_eth_channel *ch;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
++ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
++ NAPI_POLL_WEIGHT);
++ }
++}
++
++static void dpaa2_eth_napi_del(struct dpaa2_eth_priv *priv)
++{
++ int i;
++ struct dpaa2_eth_channel *ch;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ netif_napi_del(&ch->napi);
++ }
++}
++
++/* SysFS support */
++
++static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ /* No MC API for getting the shaping config. We're stateful. */
++ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
++
++ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
++}
++
++static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
++{
++ int err, items;
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ struct dpni_tx_shaping_cfg scfg;
++
++ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
++ if (items != 2) {
++ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
++ return -EINVAL;
++ }
++ /* Size restriction as per MC API documentation */
++ if (scfg.max_burst_size > 64000) {
++ pr_err("max_burst_size must be <= 64000, thanks.\n");
++ return -EINVAL;
++ }
++
++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg);
++ if (err) {
++ dev_err(dev, "dpni_set_tx_shaping() failed\n");
++ return -EPERM;
++ }
++ /* If successful, save the current configuration for future inquiries */
++ priv->shaping_cfg = scfg;
++
++ return count;
++}
++
++static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++
++ return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask);
++}
++
++static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ struct dpaa2_eth_fq *fq;
++ bool running = netif_running(priv->net_dev);
++ int i, err;
++
++ err = cpulist_parse(buf, &priv->txconf_cpumask);
++ if (err)
++ return err;
++
++ /* Only accept CPUs that have an affine DPIO */
++ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) {
++ netdev_info(priv->net_dev,
++ "cpumask must be a subset of 0x%lx\n",
++ *cpumask_bits(&priv->dpio_cpumask));
++ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask,
++ &priv->txconf_cpumask);
++ }
++
++ /* Rewiring the TxConf FQs requires interface shutdown.
++ */
++ if (running) {
++ err = dpaa2_eth_stop(priv->net_dev);
++ if (err)
++ return -ENODEV;
++ }
++
++ /* Set the new TxConf FQ affinities */
++ dpaa2_set_fq_affinity(priv);
++
++#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL
++ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit
++ * link up notification is received. Give the polling thread enough time
++ * to detect the link state change, or else we'll end up with the
++ * transmission side forever shut down.
++ */
++ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
++#endif
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ if (fq->type != DPAA2_TX_CONF_FQ)
++ continue;
++ dpaa2_tx_flow_setup(priv, fq);
++ }
++
++ if (running) {
++ err = dpaa2_eth_open(priv->net_dev);
++ if (err)
++ return -ENODEV;
++ }
++
++ return count;
++}
++
++static struct device_attribute dpaa2_eth_attrs[] = {
++ __ATTR(txconf_cpumask,
++ S_IRUSR | S_IWUSR,
++ dpaa2_eth_show_txconf_cpumask,
++ dpaa2_eth_write_txconf_cpumask),
++
++ __ATTR(tx_shaping,
++ S_IRUSR | S_IWUSR,
++ dpaa2_eth_show_tx_shaping,
++ dpaa2_eth_write_tx_shaping),
++};
++
++void dpaa2_eth_sysfs_init(struct device *dev)
++{
++ int i, err;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
++ err = device_create_file(dev, &dpaa2_eth_attrs[i]);
++ if (err) {
++ dev_err(dev, "ERROR creating sysfs file\n");
++ goto undo;
++ }
++ }
++ return;
++
++undo:
++ while (i > 0)
++ device_remove_file(dev, &dpaa2_eth_attrs[--i]);
++}
++
++void dpaa2_eth_sysfs_remove(struct device *dev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
++ device_remove_file(dev, &dpaa2_eth_attrs[i]);
++}
++
++static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
++{
++ struct device *dev;
++ struct net_device *net_dev = NULL;
++ struct dpaa2_eth_priv *priv = NULL;
++ int err = 0;
++
++ dev = &dpni_dev->dev;
++
++ /* Net device */
++ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
++ if (!net_dev) {
++ dev_err(dev, "alloc_etherdev_mq() failed\n");
++ return -ENOMEM;
++ }
++
++ SET_NETDEV_DEV(net_dev, dev);
++ dev_set_drvdata(dev, net_dev);
++
++ priv = netdev_priv(net_dev);
++ priv->net_dev = net_dev;
++ priv->msg_enable = netif_msg_init(debug, -1);
++
++ /* Obtain a MC portal */
++ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &priv->mc_io);
++ if (err) {
++ dev_err(dev, "MC portal allocation failed\n");
++ goto err_portal_alloc;
++ }
++
++#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL
++ err = fsl_mc_allocate_irqs(dpni_dev);
++ if (err) {
++ dev_err(dev, "MC irqs allocation failed\n");
++ goto err_irqs_alloc;
++ }
++#endif
++
++ /* DPNI initialization */
++ err = dpaa2_dpni_setup(dpni_dev);
++ if (err < 0)
++ goto err_dpni_setup;
++
++ /* DPIO */
++ err = dpaa2_dpio_setup(priv);
++ if (err)
++ goto err_dpio_setup;
++
++ /* FQs */
++ dpaa2_eth_setup_fqs(priv);
++ dpaa2_set_fq_affinity(priv);
++
++ /* DPBP */
++ err = dpaa2_dpbp_setup(priv);
++ if (err)
++ goto err_dpbp_setup;
++
++ /* DPNI binding to DPIO and DPBPs */
++ err = dpaa2_dpni_bind(priv);
++ if (err)
++ goto err_bind;
++
++ dpaa2_eth_napi_add(priv);
++
++ /* Percpu statistics */
++ priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
++ if (!priv->percpu_stats) {
++ dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
++ err = -ENOMEM;
++ goto err_alloc_percpu_stats;
++ }
++ priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
++ if (!priv->percpu_extras) {
++ dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
++ err = -ENOMEM;
++ goto err_alloc_percpu_extras;
++ }
++
++ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id);
++ if (!dev_valid_name(net_dev->name)) {
++ dev_warn(&net_dev->dev,
++ "netdevice name \"%s\" cannot be used, reverting to default..\n",
++ net_dev->name);
++ dev_alloc_name(net_dev, "eth%d");
++ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name);
++ }
++
++ err = dpaa2_eth_netdev_init(net_dev);
++ if (err)
++ goto err_netdev_init;
++
++ /* Configure checksum offload based on current interface flags */
++ err = dpaa2_eth_set_rx_csum(priv,
++ !!(net_dev->features & NETIF_F_RXCSUM));
++ if (err)
++ goto err_csum;
++
++ err = dpaa2_eth_set_tx_csum(priv,
++ !!(net_dev->features &
++ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
++ if (err)
++ goto err_csum;
++
++ err = dpaa2_eth_alloc_rings(priv);
++ if (err)
++ goto err_alloc_rings;
++
++ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
++
++#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL
++ priv->poll_thread = kthread_run(dpaa2_poll_link_state, priv,
++ "%s_poll_link", net_dev->name);
++#else
++ err = dpaa2_eth_setup_irqs(dpni_dev);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d setting up interrupts", err);
++ goto err_setup_irqs;
++ }
++#endif
++
++ dpaa2_eth_sysfs_init(&net_dev->dev);
++ dpaa2_dbg_add(priv);
++
++ dev_info(dev, "Probed interface %s\n", net_dev->name);
++ return 0;
++
++#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL
++err_setup_irqs:
++#endif
++ dpaa2_eth_free_rings(priv);
++err_alloc_rings:
++err_csum:
++ unregister_netdev(net_dev);
++err_netdev_init:
++ free_percpu(priv->percpu_extras);
++err_alloc_percpu_extras:
++ free_percpu(priv->percpu_stats);
++err_alloc_percpu_stats:
++ dpaa2_eth_napi_del(priv);
++err_bind:
++ dpaa2_dpbp_free(priv);
++err_dpbp_setup:
++ dpaa2_dpio_free(priv);
++err_dpio_setup:
++ kfree(priv->cls_rule);
++ dpni_close(priv->mc_io, 0, priv->mc_token);
++err_dpni_setup:
++#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL
++ fsl_mc_free_irqs(dpni_dev);
++err_irqs_alloc:
++#endif
++ fsl_mc_portal_free(priv->mc_io);
++err_portal_alloc:
++ dev_set_drvdata(dev, NULL);
++ free_netdev(net_dev);
++
++ return err;
++}
++
++static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev;
++ struct net_device *net_dev;
++ struct dpaa2_eth_priv *priv;
++
++ dev = &ls_dev->dev;
++ net_dev = dev_get_drvdata(dev);
++ priv = netdev_priv(net_dev);
++
++ dpaa2_dbg_remove(priv);
++ dpaa2_eth_sysfs_remove(&net_dev->dev);
++
++ unregister_netdev(net_dev);
++ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
++
++ dpaa2_dpio_free(priv);
++ dpaa2_eth_free_rings(priv);
++ dpaa2_eth_napi_del(priv);
++ dpaa2_dpbp_free(priv);
++ dpaa2_dpni_free(priv);
++
++ fsl_mc_portal_free(priv->mc_io);
++
++ free_percpu(priv->percpu_stats);
++ free_percpu(priv->percpu_extras);
++
++#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL
++ kthread_stop(priv->poll_thread);
++#else
++ fsl_mc_free_irqs(ls_dev);
++#endif
++
++ kfree(priv->cls_rule);
++
++ dev_set_drvdata(dev, NULL);
++ free_netdev(net_dev);
++
++ return 0;
++}
++
++static const struct fsl_mc_device_match_id dpaa2_eth_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpni",
++ .ver_major = DPNI_VER_MAJOR,
++ .ver_minor = DPNI_VER_MINOR
++ },
++ { .vendor = 0x0 }
++};
++
++static struct fsl_mc_driver dpaa2_eth_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_eth_probe,
++ .remove = dpaa2_eth_remove,
++ .match_id_table = dpaa2_eth_match_id_table
++};
++
++static int __init dpaa2_eth_driver_init(void)
++{
++ int err;
++
++ dpaa2_eth_dbg_init();
++
++ err = fsl_mc_driver_register(&dpaa2_eth_driver);
++ if (err) {
++ dpaa2_eth_dbg_exit();
++ return err;
++ }
++
++ return 0;
++}
++
++static void __exit dpaa2_eth_driver_exit(void)
++{
++ fsl_mc_driver_unregister(&dpaa2_eth_driver);
++ dpaa2_eth_dbg_exit();
++}
++
++module_init(dpaa2_eth_driver_init);
++module_exit(dpaa2_eth_driver_exit);
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+@@ -0,0 +1,366 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __DPAA2_ETH_H
++#define __DPAA2_ETH_H
++
++#include <linux/netdevice.h>
++#include <linux/if_vlan.h>
++#include "../../fsl-mc/include/fsl_dpaa2_io.h"
++#include "../../fsl-mc/include/fsl_dpaa2_fd.h"
++#include "../../fsl-mc/include/dpbp.h"
++#include "../../fsl-mc/include/dpbp-cmd.h"
++#include "../../fsl-mc/include/dpcon.h"
++#include "../../fsl-mc/include/dpcon-cmd.h"
++#include "../../fsl-mc/include/dpmng.h"
++#include "dpni.h"
++#include "dpni-cmd.h"
++
++#include "dpaa2-eth-trace.h"
++#include "dpaa2-eth-debugfs.h"
++
++#define DPAA2_ETH_STORE_SIZE 16
++
++/* Maximum receive frame size is 64K */
++#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUFFER_SIZE)
++
++/* Maximum acceptable MTU value. It is in direct relation with the MC-enforced
++ * Max Frame Length (currently 10k).
++ */
++#define DPAA2_ETH_MFL (10 * 1024)
++#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
++/* Convert L3 MTU to L2 MFL */
++#define DPAA2_ETH_L2_MAX_FRM(mtu) (mtu + VLAN_ETH_HLEN)
++
++/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
++ * frames in the Rx queues (length of the current frame is not
++ * taken into account when making the taildrop decision)
++ */
++#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
++
++/* Buffer quota per queue. Must be large enough such that for minimum sized
++ * frames taildrop kicks in before the bpool gets depleted, so we compute
++ * how many 64B frames fit inside the taildrop threshold and add a margin
++ * to accommodate the buffer refill delay.
++ */
++#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
++#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
++#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE
++
++/* Hardware requires alignment for ingress/egress buffer addresses
++ * and ingress buffer lengths.
++ */
++#define DPAA2_ETH_RX_BUFFER_SIZE 2048
++#define DPAA2_ETH_TX_BUF_ALIGN 64
++#define DPAA2_ETH_RX_BUF_ALIGN 256
++#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
++ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
++
++#define DPAA2_ETH_BUF_RAW_SIZE \
++ (DPAA2_ETH_RX_BUFFER_SIZE + \
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
++ DPAA2_ETH_RX_BUF_ALIGN)
++
++/* PTP nominal frequency 1MHz */
++#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1000
++
++/* We are accommodating a skb backpointer and some S/G info
++ * in the frame's software annotation. The hardware
++ * options are either 0 or 64, so we choose the latter.
++ */
++#define DPAA2_ETH_SWA_SIZE 64
++
++/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
++struct dpaa2_eth_swa {
++ struct sk_buff *skb;
++ struct scatterlist *scl;
++ int num_sg;
++ int num_dma_bufs;
++};
++
++/* Annotation valid bits in FD FRC */
++#define DPAA2_FD_FRC_FASV 0x8000
++#define DPAA2_FD_FRC_FAEADV 0x4000
++#define DPAA2_FD_FRC_FAPRV 0x2000
++#define DPAA2_FD_FRC_FAIADV 0x1000
++#define DPAA2_FD_FRC_FASWOV 0x0800
++#define DPAA2_FD_FRC_FAICFDV 0x0400
++
++/* Annotation bits in FD CTRL */
++#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
++#define DPAA2_FD_CTRL_PTA 0x00800000
++#define DPAA2_FD_CTRL_PTV1 0x00400000
++
++/* Frame annotation status */
++struct dpaa2_fas {
++ u8 reserved;
++ u8 ppid;
++ __le16 ifpid;
++ __le32 status;
++} __packed;
++
++/* Debug frame, otherwise supposed to be discarded */
++#define DPAA2_ETH_FAS_DISC 0x80000000
++/* MACSEC frame */
++#define DPAA2_ETH_FAS_MS 0x40000000
++#define DPAA2_ETH_FAS_PTP 0x08000000
++/* Ethernet multicast frame */
++#define DPAA2_ETH_FAS_MC 0x04000000
++/* Ethernet broadcast frame */
++#define DPAA2_ETH_FAS_BC 0x02000000
++#define DPAA2_ETH_FAS_KSE 0x00040000
++#define DPAA2_ETH_FAS_EOFHE 0x00020000
++#define DPAA2_ETH_FAS_MNLE 0x00010000
++#define DPAA2_ETH_FAS_TIDE 0x00008000
++#define DPAA2_ETH_FAS_PIEE 0x00004000
++/* Frame length error */
++#define DPAA2_ETH_FAS_FLE 0x00002000
++/* Frame physical error; our favourite pastime */
++#define DPAA2_ETH_FAS_FPE 0x00001000
++#define DPAA2_ETH_FAS_PTE 0x00000080
++#define DPAA2_ETH_FAS_ISP 0x00000040
++#define DPAA2_ETH_FAS_PHE 0x00000020
++#define DPAA2_ETH_FAS_BLE 0x00000010
++/* L3 csum validation performed */
++#define DPAA2_ETH_FAS_L3CV 0x00000008
++/* L3 csum error */
++#define DPAA2_ETH_FAS_L3CE 0x00000004
++/* L4 csum validation performed */
++#define DPAA2_ETH_FAS_L4CV 0x00000002
++/* L4 csum error */
++#define DPAA2_ETH_FAS_L4CE 0x00000001
++/* These bits always signal errors */
++#define DPAA2_ETH_RX_ERR_MASK (DPAA2_ETH_FAS_KSE | \
++ DPAA2_ETH_FAS_EOFHE | \
++ DPAA2_ETH_FAS_MNLE | \
++ DPAA2_ETH_FAS_TIDE | \
++ DPAA2_ETH_FAS_PIEE | \
++ DPAA2_ETH_FAS_FLE | \
++ DPAA2_ETH_FAS_FPE | \
++ DPAA2_ETH_FAS_PTE | \
++ DPAA2_ETH_FAS_ISP | \
++ DPAA2_ETH_FAS_PHE | \
++ DPAA2_ETH_FAS_BLE | \
++ DPAA2_ETH_FAS_L3CE | \
++ DPAA2_ETH_FAS_L4CE)
++/* Unsupported features in the ingress */
++#define DPAA2_ETH_RX_UNSUPP_MASK DPAA2_ETH_FAS_MS
++/* Tx errors */
++#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_ETH_FAS_KSE | \
++ DPAA2_ETH_FAS_EOFHE | \
++ DPAA2_ETH_FAS_MNLE | \
++ DPAA2_ETH_FAS_TIDE)
++
++/* Time in milliseconds between link state updates */
++#define DPAA2_ETH_LINK_STATE_REFRESH 1000
++
++/* Driver statistics, other than those in struct rtnl_link_stats64.
++ * These are usually collected per-CPU and aggregated by ethtool.
++ */
++struct dpaa2_eth_stats {
++ __u64 tx_conf_frames;
++ __u64 tx_conf_bytes;
++ __u64 tx_sg_frames;
++ __u64 tx_sg_bytes;
++ __u64 rx_sg_frames;
++ __u64 rx_sg_bytes;
++ /* Enqueues retried due to portal busy */
++ __u64 tx_portal_busy;
++};
++
++/* Per-FQ statistics */
++struct dpaa2_eth_fq_stats {
++ /* Number of frames received on this queue */
++ __u64 frames;
++};
++
++/* Per-channel statistics */
++struct dpaa2_eth_ch_stats {
++ /* Volatile dequeues retried due to portal busy */
++ __u64 dequeue_portal_busy;
++ /* Number of CDANs; useful to estimate avg NAPI len */
++ __u64 cdan;
++ /* Number of frames received on queues from this channel */
++ __u64 frames;
++};
++
++/* Maximum number of Rx queues associated with a DPNI */
++#define DPAA2_ETH_MAX_RX_QUEUES 16
++#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
++#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
++#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
++ DPAA2_ETH_MAX_TX_QUEUES + \
++ DPAA2_ETH_MAX_RX_ERR_QUEUES)
++
++#define DPAA2_ETH_MAX_DPCONS NR_CPUS
++
++enum dpaa2_eth_fq_type {
++ DPAA2_RX_FQ = 0,
++ DPAA2_TX_CONF_FQ,
++ DPAA2_RX_ERR_FQ
++};
++
++struct dpaa2_eth_priv;
++
++struct dpaa2_eth_fq {
++ u32 fqid;
++ u16 flowid;
++ int target_cpu;
++ struct dpaa2_eth_channel *channel;
++ enum dpaa2_eth_fq_type type;
++
++ void (*consume)(struct dpaa2_eth_priv *,
++ struct dpaa2_eth_channel *,
++ const struct dpaa2_fd *,
++ struct napi_struct *);
++ struct dpaa2_eth_priv *netdev_priv; /* backpointer */
++ struct dpaa2_eth_fq_stats stats;
++};
++
++struct dpaa2_eth_channel {
++ struct dpaa2_io_notification_ctx nctx;
++ struct fsl_mc_device *dpcon;
++ int dpcon_id;
++ int ch_id;
++ int dpio_id;
++ struct napi_struct napi;
++ struct dpaa2_io_store *store;
++ struct dpaa2_eth_priv *priv;
++ int buf_count;
++ struct dpaa2_eth_ch_stats stats;
++};
++
++struct dpaa2_cls_rule {
++ struct ethtool_rx_flow_spec fs;
++ bool in_use;
++};
++
++struct dpaa2_eth_priv {
++ struct net_device *net_dev;
++
++ u8 num_fqs;
++ /* First queue is tx conf, the rest are rx */
++ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
++
++ u8 num_channels;
++ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
++
++ int dpni_id;
++ struct dpni_attr dpni_attrs;
++ struct dpni_extended_cfg dpni_ext_cfg;
++ /* Insofar as the MC is concerned, we're using one layout on all 3 types
++ * of buffers (Rx, Tx, Tx-Conf).
++ */
++ struct dpni_buffer_layout buf_layout;
++ u16 tx_data_offset;
++
++ struct fsl_mc_device *dpbp_dev;
++ struct dpbp_attr dpbp_attrs;
++
++ u16 tx_qdid;
++ struct fsl_mc_io *mc_io;
++ /* SysFS-controlled affinity mask for TxConf FQs */
++ struct cpumask txconf_cpumask;
++ /* Cores which have an affine DPIO/DPCON.
++ * This is the cpu set on which Rx frames are processed;
++ * Tx confirmation frames are processed on a subset of this,
++ * depending on user settings.
++ */
++ struct cpumask dpio_cpumask;
++
++ /* Standard statistics */
++ struct rtnl_link_stats64 __percpu *percpu_stats;
++ /* Extra stats, in addition to the ones known by the kernel */
++ struct dpaa2_eth_stats __percpu *percpu_extras;
++ u32 msg_enable; /* net_device message level */
++
++ u16 mc_token;
++
++ struct dpni_link_state link_state;
++ struct task_struct *poll_thread;
++
++ /* enabled ethtool hashing bits */
++ u64 rx_hash_fields;
++
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++ struct dpaa2_debugfs dbg;
++#endif
++
++ /* array of classification rules */
++ struct dpaa2_cls_rule *cls_rule;
++
++ struct dpni_tx_shaping_cfg shaping_cfg;
++
++ bool ts_tx_en; /* Tx timestamping enabled */
++ bool ts_rx_en; /* Rx timestamping enabled */
++};
++
++/* default Rx hash options, set during probing */
++#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
++ | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
++ | RXH_L4_B_2_3)
++
++#define dpaa2_eth_hash_enabled(priv) \
++ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_HASH)
++
++#define dpaa2_eth_fs_enabled(priv) \
++ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_FS)
++
++#define DPAA2_CLASSIFIER_ENTRY_COUNT 16
++
++/* Required by struct dpni_attr::ext_cfg_iova */
++#define DPAA2_EXT_CFG_SIZE 256
++
++extern const struct ethtool_ops dpaa2_ethtool_ops;
++
++int dpaa2_set_hash(struct net_device *net_dev, u64 flags);
++
++static int dpaa2_queue_count(struct dpaa2_eth_priv *priv)
++{
++ if (!dpaa2_eth_hash_enabled(priv))
++ return 1;
++
++ return priv->dpni_ext_cfg.tc_cfg[0].max_dist;
++}
++
++static inline int dpaa2_max_channels(struct dpaa2_eth_priv *priv)
++{
++ /* Ideally, we want a number of channels large enough
++ * to accommodate both the Rx distribution size
++ * and the max number of Tx confirmation queues
++ */
++ return max_t(int, dpaa2_queue_count(priv),
++ priv->dpni_attrs.max_senders);
++}
++
++void dpaa2_cls_check(struct net_device *);
++
++#endif /* __DPAA2_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+@@ -0,0 +1,882 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "dpni.h" /* DPNI_LINK_OPT_* */
++#include "dpaa2-eth.h"
++
++/* size of DMA memory used to pass configuration to classifier, in bytes */
++#define DPAA2_CLASSIFIER_DMA_SIZE 256
++
++/* To be kept in sync with 'enum dpni_counter' */
++char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
++ "rx frames",
++ "rx bytes",
++ "rx frames dropped",
++ "rx err frames",
++ "rx mcast frames",
++ "rx mcast bytes",
++ "rx bcast frames",
++ "rx bcast bytes",
++ "tx frames",
++ "tx bytes",
++ "tx err frames",
++};
++
++#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
++
++/* To be kept in sync with 'struct dpaa2_eth_stats' */
++char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
++ /* per-cpu stats */
++
++ "tx conf frames",
++ "tx conf bytes",
++ "tx sg frames",
++ "tx sg bytes",
++ "rx sg frames",
++ "rx sg bytes",
++ /* how many times we had to retry the enqueue command */
++ "tx portal busy",
++
++ /* Channel stats */
++
++ /* How many times we had to retry the volatile dequeue command */
++ "portal busy",
++ /* Number of notifications received */
++ "cdan",
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ /* FQ stats */
++ "rx pending frames",
++ "rx pending bytes",
++ "tx conf pending frames",
++ "tx conf pending bytes",
++ "buffer count"
++#endif
++};
++
++#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
++
++static void dpaa2_get_drvinfo(struct net_device *net_dev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ struct mc_version mc_ver;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ char fw_version[ETHTOOL_FWVERS_LEN];
++ char version[32];
++ int err;
++
++ err = mc_get_version(priv->mc_io, 0, &mc_ver);
++ if (err) {
++ strlcpy(drvinfo->fw_version, "Error retrieving MC version",
++ sizeof(drvinfo->fw_version));
++ } else {
++ scnprintf(fw_version, sizeof(fw_version), "%d.%d.%d",
++ mc_ver.major, mc_ver.minor, mc_ver.revision);
++ strlcpy(drvinfo->fw_version, fw_version,
++ sizeof(drvinfo->fw_version));
++ }
++
++ scnprintf(version, sizeof(version), "%d.%d", DPNI_VER_MAJOR,
++ DPNI_VER_MINOR);
++ strlcpy(drvinfo->version, version, sizeof(drvinfo->version));
++
++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
++ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
++ sizeof(drvinfo->bus_info));
++}
++
++static u32 dpaa2_get_msglevel(struct net_device *net_dev)
++{
++ return ((struct dpaa2_eth_priv *)netdev_priv(net_dev))->msg_enable;
++}
++
++static void dpaa2_set_msglevel(struct net_device *net_dev,
++ u32 msg_enable)
++{
++ ((struct dpaa2_eth_priv *)netdev_priv(net_dev))->msg_enable =
++ msg_enable;
++}
++
++static int dpaa2_get_settings(struct net_device *net_dev,
++ struct ethtool_cmd *cmd)
++{
++ struct dpni_link_state state = {0};
++ int err = 0;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d getting link state", err);
++ goto out;
++ }
++
++ /* At the moment, we have no way of interrogating the DPMAC
++ * from the DPNI side - and for that matter there may exist
++ * no DPMAC at all. So for now we just don't report anything
++ * beyond the DPNI attributes.
++ */
++ if (state.options & DPNI_LINK_OPT_AUTONEG)
++ cmd->autoneg = AUTONEG_ENABLE;
++ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
++ cmd->duplex = DUPLEX_FULL;
++ ethtool_cmd_speed_set(cmd, state.rate);
++
++out:
++ return err;
++}
++
++static int dpaa2_set_settings(struct net_device *net_dev,
++ struct ethtool_cmd *cmd)
++{
++ struct dpni_link_cfg cfg = {0};
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err = 0;
++
++ netdev_dbg(net_dev, "Setting link parameters...");
++
++ /* Due to a temporary firmware limitation, the DPNI must be down
++ * in order to be able to change link settings. Taking steps to let
++ * the user know that.
++ */
++ if (netif_running(net_dev)) {
++ netdev_info(net_dev, "Sorry, interface must be brought down first.\n");
++ return -EACCES;
++ }
++
++ cfg.rate = ethtool_cmd_speed(cmd);
++ if (cmd->autoneg == AUTONEG_ENABLE)
++ cfg.options |= DPNI_LINK_OPT_AUTONEG;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
++ if (cmd->duplex == DUPLEX_HALF)
++ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
++
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err)
++ /* ethtool will be loud enough if we return an error; no point
++ * in putting our own error message on the console by default
++ */
++ netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
++
++ return err;
++}
++
++static void dpaa2_get_strings(struct net_device *netdev, u32 stringset,
++ u8 *data)
++{
++ u8 *p = data;
++ int i;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
++ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
++ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
++ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
++ break;
++ }
++}
++
++static int dpaa2_get_sset_count(struct net_device *net_dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
++ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++/** Fill in hardware counters, as returned by the MC firmware.
++ */
++static void dpaa2_get_ethtool_stats(struct net_device *net_dev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ int i; /* Current index in the data array */
++ int j, k, err;
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ u32 fcnt, bcnt;
++ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
++ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
++ u32 buf_cnt;
++#endif
++ u64 cdan = 0;
++ u64 portal_busy = 0;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpaa2_eth_stats *extras;
++ struct dpaa2_eth_ch_stats *ch_stats;
++
++ memset(data, 0,
++ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
++
++ /* Print standard counters, from DPNI statistics */
++ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
++ err = dpni_get_counter(priv->mc_io, 0, priv->mc_token, i,
++ data + i);
++ if (err != 0)
++ netdev_warn(net_dev, "Err %d getting DPNI counter %d",
++ err, i);
++ }
++
++ /* Print per-cpu extra stats */
++ for_each_online_cpu(k) {
++ extras = per_cpu_ptr(priv->percpu_extras, k);
++ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
++ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
++ }
++ i += j;
++
++ /* We may be using fewer DPIOs than actual CPUs */
++ for_each_cpu(j, &priv->dpio_cpumask) {
++ ch_stats = &priv->channel[j]->stats;
++ cdan += ch_stats->cdan;
++ portal_busy += ch_stats->dequeue_portal_busy;
++ }
++
++ *(data + i++) = portal_busy;
++ *(data + i++) = cdan;
++
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ for (j = 0; j < priv->num_fqs; j++) {
++ /* Print FQ instantaneous counts */
++ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
++ &fcnt, &bcnt);
++ if (err) {
++ netdev_warn(net_dev, "FQ query error %d", err);
++ return;
++ }
++
++ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
++ fcnt_tx_total += fcnt;
++ bcnt_tx_total += bcnt;
++ } else {
++ fcnt_rx_total += fcnt;
++ bcnt_rx_total += bcnt;
++ }
++ }
++ *(data + i++) = fcnt_rx_total;
++ *(data + i++) = bcnt_rx_total;
++ *(data + i++) = fcnt_tx_total;
++ *(data + i++) = bcnt_tx_total;
++
++ err = dpaa2_io_query_bp_count(NULL, priv->dpbp_attrs.bpid, &buf_cnt);
++ if (err) {
++ netdev_warn(net_dev, "Buffer count query error %d\n", err);
++ return;
++ }
++ *(data + i++) = buf_cnt;
++#endif
++}
++
++static const struct dpaa2_hash_fields {
++ u64 rxnfc_field;
++ enum net_prot cls_prot;
++ int cls_field;
++ int size;
++} dpaa2_hash_fields[] = {
++ {
++ /* L2 header */
++ .rxnfc_field = RXH_L2DA,
++ .cls_prot = NET_PROT_ETH,
++ .cls_field = NH_FLD_ETH_DA,
++ .size = 6,
++ }, {
++ /* VLAN header */
++ .rxnfc_field = RXH_VLAN,
++ .cls_prot = NET_PROT_VLAN,
++ .cls_field = NH_FLD_VLAN_TCI,
++ .size = 2,
++ }, {
++ /* IP header */
++ .rxnfc_field = RXH_IP_SRC,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_SRC,
++ .size = 4,
++ }, {
++ .rxnfc_field = RXH_IP_DST,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_DST,
++ .size = 4,
++ }, {
++ .rxnfc_field = RXH_L3_PROTO,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_PROTO,
++ .size = 1,
++ }, {
++ /* Using UDP ports, this is functionally equivalent to raw
++ * byte pairs from L4 header.
++ */
++ .rxnfc_field = RXH_L4_B_0_1,
++ .cls_prot = NET_PROT_UDP,
++ .cls_field = NH_FLD_UDP_PORT_SRC,
++ .size = 2,
++ }, {
++ .rxnfc_field = RXH_L4_B_2_3,
++ .cls_prot = NET_PROT_UDP,
++ .cls_field = NH_FLD_UDP_PORT_DST,
++ .size = 2,
++ },
++};
++
++static int dpaa2_cls_is_enabled(struct net_device *net_dev, u64 flag)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++
++ return !!(priv->rx_hash_fields & flag);
++}
++
++static int dpaa2_cls_key_off(struct net_device *net_dev, u64 flag)
++{
++ int i, off = 0;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) {
++ if (dpaa2_hash_fields[i].rxnfc_field & flag)
++ return off;
++ if (dpaa2_cls_is_enabled(net_dev,
++ dpaa2_hash_fields[i].rxnfc_field))
++ off += dpaa2_hash_fields[i].size;
++ }
++
++ return -1;
++}
++
++static u8 dpaa2_cls_key_size(struct net_device *net_dev)
++{
++ u8 i, size = 0;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) {
++ if (!dpaa2_cls_is_enabled(net_dev,
++ dpaa2_hash_fields[i].rxnfc_field))
++ continue;
++ size += dpaa2_hash_fields[i].size;
++ }
++
++ return size;
++}
++
++static u8 dpaa2_cls_max_key_size(struct net_device *net_dev)
++{
++ u8 i, size = 0;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++)
++ size += dpaa2_hash_fields[i].size;
++
++ return size;
++}
++
++void dpaa2_cls_check(struct net_device *net_dev)
++{
++ u8 key_size = dpaa2_cls_max_key_size(net_dev);
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++
++ if (priv->dpni_attrs.options & DPNI_OPT_DIST_FS &&
++ priv->dpni_attrs.max_dist_key_size < key_size) {
++ dev_err(&net_dev->dev,
++ "max_dist_key_size = %d, expected %d. Steering is disabled\n",
++ priv->dpni_attrs.max_dist_key_size,
++ key_size);
++ priv->dpni_attrs.options &= ~DPNI_OPT_DIST_FS;
++ }
++}
++
++/* Set RX hash options
++ * flags is a combination of RXH_ bits
++ */
++int dpaa2_set_hash(struct net_device *net_dev, u64 flags)
++{
++ struct device *dev = net_dev->dev.parent;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpkg_profile_cfg cls_cfg;
++ struct dpni_rx_tc_dist_cfg dist_cfg;
++ u8 *dma_mem;
++ u64 enabled_flags = 0;
++ int i;
++ int err = 0;
++
++ if (!dpaa2_eth_hash_enabled(priv)) {
++ dev_err(dev, "Hashing support is not enabled\n");
++ return -EOPNOTSUPP;
++ }
++
++ if (flags & ~DPAA2_RXH_SUPPORTED) {
++ /* RXH_DISCARD is not supported */
++ dev_err(dev, "unsupported option selected, supported options are: mvtsdfn\n");
++ return -EOPNOTSUPP;
++ }
++
++ memset(&cls_cfg, 0, sizeof(cls_cfg));
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) {
++ struct dpkg_extract *key =
++ &cls_cfg.extracts[cls_cfg.num_extracts];
++
++ if (!(flags & dpaa2_hash_fields[i].rxnfc_field))
++ continue;
++
++ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
++ dev_err(dev, "error adding key extraction rule, too many rules?\n");
++ return -E2BIG;
++ }
++
++ key->type = DPKG_EXTRACT_FROM_HDR;
++ key->extract.from_hdr.prot =
++ dpaa2_hash_fields[i].cls_prot;
++ key->extract.from_hdr.type = DPKG_FULL_FIELD;
++ key->extract.from_hdr.field =
++ dpaa2_hash_fields[i].cls_field;
++ cls_cfg.num_extracts++;
++
++ enabled_flags |= dpaa2_hash_fields[i].rxnfc_field;
++ }
++
++ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
++ if (!dma_mem)
++ return -ENOMEM;
++
++ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
++ if (err) {
++ dev_err(dev, "dpni_prepare_key_cfg error %d", err);
++ return err;
++ }
++
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
++
++ /* Prepare for setting the rx dist */
++ dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem,
++ DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) {
++ dev_err(dev, "DMA mapping failed\n");
++ kfree(dma_mem);
++ return -ENOMEM;
++ }
++
++ dist_cfg.dist_size = dpaa2_queue_count(priv);
++ if (dpaa2_eth_fs_enabled(priv)) {
++ dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
++ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
++ } else {
++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
++ }
++
++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
++ dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova,
++ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
++ kfree(dma_mem);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
++ return err;
++ }
++
++ priv->rx_hash_fields = enabled_flags;
++
++ return 0;
++}
++
++static int dpaa2_cls_prep_rule(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ void *key)
++{
++ struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m;
++ struct ethhdr *eth_h, *eth_m;
++ struct ethtool_flow_ext *ext_h, *ext_m;
++ const u8 key_size = dpaa2_cls_key_size(net_dev);
++ void *msk = key + key_size;
++
++ memset(key, 0, key_size * 2);
++
++ /* This code is a major mess, it has to be cleaned up after the
++ * classification mask issue is fixed and key format will be made static
++ */
++
++ switch (fs->flow_type & 0xff) {
++ case TCP_V4_FLOW:
++ l4ip4_h = &fs->h_u.tcp_ip4_spec;
++ l4ip4_m = &fs->m_u.tcp_ip4_spec;
++ /* TODO: ethertype to match IPv4 and protocol to match TCP */
++ goto l4ip4;
++
++ case UDP_V4_FLOW:
++ l4ip4_h = &fs->h_u.udp_ip4_spec;
++ l4ip4_m = &fs->m_u.udp_ip4_spec;
++ goto l4ip4;
++
++ case SCTP_V4_FLOW:
++ l4ip4_h = &fs->h_u.sctp_ip4_spec;
++ l4ip4_m = &fs->m_u.sctp_ip4_spec;
++
++l4ip4:
++ if (l4ip4_m->tos) {
++ netdev_err(net_dev,
++ "ToS is not supported for IPv4 L4\n");
++ return -EOPNOTSUPP;
++ }
++ if (l4ip4_m->ip4src &&
++ !dpaa2_cls_is_enabled(net_dev, RXH_IP_SRC)) {
++ netdev_err(net_dev, "IP SRC not supported!\n");
++ return -EOPNOTSUPP;
++ }
++ if (l4ip4_m->ip4dst &&
++ !dpaa2_cls_is_enabled(net_dev, RXH_IP_DST)) {
++ netdev_err(net_dev, "IP DST not supported!\n");
++ return -EOPNOTSUPP;
++ }
++ if (l4ip4_m->psrc &&
++ !dpaa2_cls_is_enabled(net_dev, RXH_L4_B_0_1)) {
++ netdev_err(net_dev, "PSRC not supported, ignored\n");
++ return -EOPNOTSUPP;
++ }
++ if (l4ip4_m->pdst &&
++ !dpaa2_cls_is_enabled(net_dev, RXH_L4_B_2_3)) {
++ netdev_err(net_dev, "PDST not supported, ignored\n");
++ return -EOPNOTSUPP;
++ }
++
++ if (dpaa2_cls_is_enabled(net_dev, RXH_IP_SRC)) {
++ *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_IP_SRC))
++ = l4ip4_h->ip4src;
++ *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_IP_SRC))
++ = l4ip4_m->ip4src;
++ }
++ if (dpaa2_cls_is_enabled(net_dev, RXH_IP_DST)) {
++ *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_IP_DST))
++ = l4ip4_h->ip4dst;
++ *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_IP_DST))
++ = l4ip4_m->ip4dst;
++ }
++
++ if (dpaa2_cls_is_enabled(net_dev, RXH_L4_B_0_1)) {
++ *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_L4_B_0_1))
++ = l4ip4_h->psrc;
++ *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_L4_B_0_1))
++ = l4ip4_m->psrc;
++ }
++
++ if (dpaa2_cls_is_enabled(net_dev, RXH_L4_B_2_3)) {
++ *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_L4_B_2_3))
++ = l4ip4_h->pdst;
++ *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_L4_B_2_3))
++ = l4ip4_m->pdst;
++ }
++ break;
++
++ case ETHER_FLOW:
++ eth_h = &fs->h_u.ether_spec;
++ eth_m = &fs->m_u.ether_spec;
++
++ if (eth_m->h_proto) {
++ netdev_err(net_dev, "Ethertype is not supported!\n");
++ return -EOPNOTSUPP;
++ }
++
++ if (!is_zero_ether_addr(eth_m->h_source)) {
++ netdev_err(net_dev, "ETH SRC is not supported!\n");
++ return -EOPNOTSUPP;
++ }
++
++ if (dpaa2_cls_is_enabled(net_dev, RXH_L2DA)) {
++ ether_addr_copy(key
++ + dpaa2_cls_key_off(net_dev, RXH_L2DA),
++ eth_h->h_dest);
++ ether_addr_copy(msk
++ + dpaa2_cls_key_off(net_dev, RXH_L2DA),
++ eth_m->h_dest);
++ } else {
++ if (!is_zero_ether_addr(eth_m->h_dest)) {
++ netdev_err(net_dev,
++ "ETH DST is not supported!\n");
++ return -EOPNOTSUPP;
++ }
++ }
++ break;
++
++ default:
++ /* TODO: IP user flow, AH, ESP */
++ return -EOPNOTSUPP;
++ }
++
++ if (fs->flow_type & FLOW_EXT) {
++ /* TODO: ETH data, VLAN ethertype, VLAN TCI .. */
++ return -EOPNOTSUPP;
++ }
++
++ if (fs->flow_type & FLOW_MAC_EXT) {
++ ext_h = &fs->h_ext;
++ ext_m = &fs->m_ext;
++
++ if (dpaa2_cls_is_enabled(net_dev, RXH_L2DA)) {
++ ether_addr_copy(key
++ + dpaa2_cls_key_off(net_dev, RXH_L2DA),
++ ext_h->h_dest);
++ ether_addr_copy(msk
++ + dpaa2_cls_key_off(net_dev, RXH_L2DA),
++ ext_m->h_dest);
++ } else {
++ if (!is_zero_ether_addr(ext_m->h_dest)) {
++ netdev_err(net_dev,
++ "ETH DST is not supported!\n");
++ return -EOPNOTSUPP;
++ }
++ }
++ }
++ return 0;
++}
++
++static int dpaa2_do_cls(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ bool add)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT;
++ struct dpni_rule_cfg rule_cfg;
++ void *dma_mem;
++ int err = 0;
++
++ if (!dpaa2_eth_fs_enabled(priv)) {
++ netdev_err(net_dev, "dev does not support steering!\n");
++ /* dev doesn't support steering */
++ return -EOPNOTSUPP;
++ }
++
++ if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
++ fs->ring_cookie >= dpaa2_queue_count(priv)) ||
++ fs->location >= rule_cnt)
++ return -EINVAL;
++
++ memset(&rule_cfg, 0, sizeof(rule_cfg));
++ rule_cfg.key_size = dpaa2_cls_key_size(net_dev);
++
++ /* allocate twice the key size, for the actual key and for mask */
++ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
++ if (!dma_mem)
++ return -ENOMEM;
++
++ err = dpaa2_cls_prep_rule(net_dev, fs, dma_mem);
++ if (err)
++ goto err_free_mem;
++
++ rule_cfg.key_iova = dma_map_single(net_dev->dev.parent, dma_mem,
++ rule_cfg.key_size * 2,
++ DMA_TO_DEVICE);
++
++ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
++
++ if (!(priv->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT)) {
++ int i;
++ u8 *mask = dma_mem + rule_cfg.key_size;
++
++ /* check that nothing is masked out, otherwise it won't work */
++ for (i = 0; i < rule_cfg.key_size; i++) {
++ if (mask[i] == 0xff)
++ continue;
++ netdev_err(net_dev, "dev does not support masking!\n");
++ err = -EOPNOTSUPP;
++ goto err_free_mem;
++ }
++ rule_cfg.mask_iova = 0;
++ }
++
++ /* No way to control rule order in firmware */
++ if (add)
++ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
++ &rule_cfg, (u16)fs->ring_cookie);
++ else
++ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
++ &rule_cfg);
++
++ dma_unmap_single(net_dev->dev.parent, rule_cfg.key_iova,
++ rule_cfg.key_size * 2, DMA_TO_DEVICE);
++ if (err) {
++ netdev_err(net_dev, "dpaa2_add_cls() error %d\n", err);
++ goto err_free_mem;
++ }
++
++ priv->cls_rule[fs->location].fs = *fs;
++ priv->cls_rule[fs->location].in_use = true;
++
++err_free_mem:
++ kfree(dma_mem);
++
++ return err;
++}
++
++static int dpaa2_add_cls(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
++
++ err = dpaa2_do_cls(net_dev, fs, true);
++ if (err)
++ return err;
++
++ priv->cls_rule[fs->location].in_use = true;
++ priv->cls_rule[fs->location].fs = *fs;
++
++ return 0;
++}
++
++static int dpaa2_del_cls(struct net_device *net_dev, int location)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
++
++ err = dpaa2_do_cls(net_dev, &priv->cls_rule[location].fs, false);
++ if (err)
++ return err;
++
++ priv->cls_rule[location].in_use = false;
++
++ return 0;
++}
++
++static void dpaa2_clear_cls(struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int i, err;
++
++ for (i = 0; i < DPAA2_CLASSIFIER_ENTRY_COUNT; i++) {
++ if (!priv->cls_rule[i].in_use)
++ continue;
++
++ err = dpaa2_del_cls(net_dev, i);
++ if (err)
++ netdev_warn(net_dev,
++ "err trying to delete classification entry %d\n",
++ i);
++ }
++}
++
++static int dpaa2_set_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc)
++{
++ int err = 0;
++
++ switch (rxnfc->cmd) {
++ case ETHTOOL_SRXFH:
++ /* first off clear ALL classification rules, chaging key
++ * composition will break them anyway
++ */
++ dpaa2_clear_cls(net_dev);
++ /* we purposely ignore cmd->flow_type for now, because the
++ * classifier only supports a single set of fields for all
++ * protocols
++ */
++ err = dpaa2_set_hash(net_dev, rxnfc->data);
++ break;
++ case ETHTOOL_SRXCLSRLINS:
++ err = dpaa2_add_cls(net_dev, &rxnfc->fs);
++ break;
++
++ case ETHTOOL_SRXCLSRLDEL:
++ err = dpaa2_del_cls(net_dev, rxnfc->fs.location);
++ break;
++
++ default:
++ err = -EOPNOTSUPP;
++ }
++
++ return err;
++}
++
++static int dpaa2_get_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT;
++ int i, j;
++
++ switch (rxnfc->cmd) {
++ case ETHTOOL_GRXFH:
++ /* we purposely ignore cmd->flow_type for now, because the
++ * classifier only supports a single set of fields for all
++ * protocols
++ */
++ rxnfc->data = priv->rx_hash_fields;
++ break;
++
++ case ETHTOOL_GRXRINGS:
++ rxnfc->data = dpaa2_queue_count(priv);
++ break;
++
++ case ETHTOOL_GRXCLSRLCNT:
++ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
++ if (priv->cls_rule[i].in_use)
++ rxnfc->rule_cnt++;
++ rxnfc->data = rule_cnt;
++ break;
++
++ case ETHTOOL_GRXCLSRULE:
++ if (!priv->cls_rule[rxnfc->fs.location].in_use)
++ return -EINVAL;
++
++ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
++ break;
++
++ case ETHTOOL_GRXCLSRLALL:
++ for (i = 0, j = 0; i < rule_cnt; i++) {
++ if (!priv->cls_rule[i].in_use)
++ continue;
++ if (j == rxnfc->rule_cnt)
++ return -EMSGSIZE;
++ rule_locs[j++] = i;
++ }
++ rxnfc->rule_cnt = j;
++ rxnfc->data = rule_cnt;
++ break;
++
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++const struct ethtool_ops dpaa2_ethtool_ops = {
++ .get_drvinfo = dpaa2_get_drvinfo,
++ .get_msglevel = dpaa2_get_msglevel,
++ .set_msglevel = dpaa2_set_msglevel,
++ .get_link = ethtool_op_get_link,
++ .get_settings = dpaa2_get_settings,
++ .set_settings = dpaa2_set_settings,
++ .get_sset_count = dpaa2_get_sset_count,
++ .get_ethtool_stats = dpaa2_get_ethtool_stats,
++ .get_strings = dpaa2_get_strings,
++ .get_rxnfc = dpaa2_get_rxnfc,
++ .set_rxnfc = dpaa2_set_rxnfc,
++};
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+@@ -0,0 +1,175 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPKG_H_
++#define __FSL_DPKG_H_
++
++#include <linux/types.h>
++#include "../../fsl-mc/include/net.h"
++
++/* Data Path Key Generator API
++ * Contains initialization APIs and runtime APIs for the Key Generator
++ */
++
++/** Key Generator properties */
++
++/**
++ * Number of masks per key extraction
++ */
++#define DPKG_NUM_OF_MASKS 4
++/**
++ * Number of extractions per key profile
++ */
++#define DPKG_MAX_NUM_OF_EXTRACTS 10
++
++/**
++ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
++ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
++ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
++ * @DPKG_FULL_FIELD: Extract a full field
++ */
++enum dpkg_extract_from_hdr_type {
++ DPKG_FROM_HDR = 0,
++ DPKG_FROM_FIELD = 1,
++ DPKG_FULL_FIELD = 2
++};
++
++/**
++ * enum dpkg_extract_type - Enumeration for selecting extraction type
++ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
++ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
++ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
++ * e.g. can be used to extract header existence;
++ * please refer to 'Parse Result definition' section in the parser BG
++ */
++enum dpkg_extract_type {
++ DPKG_EXTRACT_FROM_HDR = 0,
++ DPKG_EXTRACT_FROM_DATA = 1,
++ DPKG_EXTRACT_FROM_PARSE = 3
++};
++
++/**
++ * struct dpkg_mask - A structure for defining a single extraction mask
++ * @mask: Byte mask for the extracted content
++ * @offset: Offset within the extracted content
++ */
++struct dpkg_mask {
++ uint8_t mask;
++ uint8_t offset;
++};
++
++/**
++ * struct dpkg_extract - A structure for defining a single extraction
++ * @type: Determines how the union below is interpreted:
++ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
++ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
++ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
++ * @extract: Selects extraction method
++ * @num_of_byte_masks: Defines the number of valid entries in the array below;
++ * This is also the number of bytes to be used as masks
++ * @masks: Masks parameters
++ */
++struct dpkg_extract {
++ enum dpkg_extract_type type;
++ /**
++ * union extract - Selects extraction method
++ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
++ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
++ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
++ */
++ union {
++ /**
++ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
++ * @prot: Any of the supported headers
++ * @type: Defines the type of header extraction:
++ * DPKG_FROM_HDR: use size & offset below;
++ * DPKG_FROM_FIELD: use field, size and offset below;
++ * DPKG_FULL_FIELD: use field below
++ * @field: One of the supported fields (NH_FLD_)
++ *
++ * @size: Size in bytes
++ * @offset: Byte offset
++ * @hdr_index: Clear for cases not listed below;
++ * Used for protocols that may have more than a single
++ * header, 0 indicates an outer header;
++ * Supported protocols (possible values):
++ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
++ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
++ * NET_PROT_IP(0, HDR_INDEX_LAST);
++ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
++ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
++ */
++
++ struct {
++ enum net_prot prot;
++ enum dpkg_extract_from_hdr_type type;
++ uint32_t field;
++ uint8_t size;
++ uint8_t offset;
++ uint8_t hdr_index;
++ } from_hdr;
++ /**
++ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
++ * @size: Size in bytes
++ * @offset: Byte offset
++ */
++ struct {
++ uint8_t size;
++ uint8_t offset;
++ } from_data;
++
++ /**
++ * struct from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
++ * @size: Size in bytes
++ * @offset: Byte offset
++ */
++ struct {
++ uint8_t size;
++ uint8_t offset;
++ } from_parse;
++ } extract;
++
++ uint8_t num_of_byte_masks;
++ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
++};
++
++/**
++ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
++ * profile (rule)
++ * @num_extracts: Defines the number of valid entries in the array below
++ * @extracts: Array of required extractions
++ */
++struct dpkg_profile_cfg {
++ uint8_t num_extracts;
++ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
++};
++
++#endif /* __FSL_DPKG_H_ */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+@@ -0,0 +1,1058 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPNI_CMD_H
++#define _FSL_DPNI_CMD_H
++
++/* DPNI Version */
++#define DPNI_VER_MAJOR 6
++#define DPNI_VER_MINOR 0
++
++/* Command IDs */
++#define DPNI_CMDID_OPEN 0x801
++#define DPNI_CMDID_CLOSE 0x800
++#define DPNI_CMDID_CREATE 0x901
++#define DPNI_CMDID_DESTROY 0x900
++
++#define DPNI_CMDID_ENABLE 0x002
++#define DPNI_CMDID_DISABLE 0x003
++#define DPNI_CMDID_GET_ATTR 0x004
++#define DPNI_CMDID_RESET 0x005
++#define DPNI_CMDID_IS_ENABLED 0x006
++
++#define DPNI_CMDID_SET_IRQ 0x010
++#define DPNI_CMDID_GET_IRQ 0x011
++#define DPNI_CMDID_SET_IRQ_ENABLE 0x012
++#define DPNI_CMDID_GET_IRQ_ENABLE 0x013
++#define DPNI_CMDID_SET_IRQ_MASK 0x014
++#define DPNI_CMDID_GET_IRQ_MASK 0x015
++#define DPNI_CMDID_GET_IRQ_STATUS 0x016
++#define DPNI_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPNI_CMDID_SET_POOLS 0x200
++#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201
++#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202
++#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203
++#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204
++#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205
++#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206
++#define DPNI_CMDID_SET_L3_CHKSUM_VALIDATION 0x207
++#define DPNI_CMDID_GET_L3_CHKSUM_VALIDATION 0x208
++#define DPNI_CMDID_SET_L4_CHKSUM_VALIDATION 0x209
++#define DPNI_CMDID_GET_L4_CHKSUM_VALIDATION 0x20A
++#define DPNI_CMDID_SET_ERRORS_BEHAVIOR 0x20B
++#define DPNI_CMDID_SET_TX_CONF_REVOKE 0x20C
++
++#define DPNI_CMDID_GET_QDID 0x210
++#define DPNI_CMDID_GET_SP_INFO 0x211
++#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212
++#define DPNI_CMDID_GET_COUNTER 0x213
++#define DPNI_CMDID_SET_COUNTER 0x214
++#define DPNI_CMDID_GET_LINK_STATE 0x215
++#define DPNI_CMDID_SET_MAX_FRAME_LENGTH 0x216
++#define DPNI_CMDID_GET_MAX_FRAME_LENGTH 0x217
++#define DPNI_CMDID_SET_MTU 0x218
++#define DPNI_CMDID_GET_MTU 0x219
++#define DPNI_CMDID_SET_LINK_CFG 0x21A
++#define DPNI_CMDID_SET_TX_SHAPING 0x21B
++
++#define DPNI_CMDID_SET_MCAST_PROMISC 0x220
++#define DPNI_CMDID_GET_MCAST_PROMISC 0x221
++#define DPNI_CMDID_SET_UNICAST_PROMISC 0x222
++#define DPNI_CMDID_GET_UNICAST_PROMISC 0x223
++#define DPNI_CMDID_SET_PRIM_MAC 0x224
++#define DPNI_CMDID_GET_PRIM_MAC 0x225
++#define DPNI_CMDID_ADD_MAC_ADDR 0x226
++#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227
++#define DPNI_CMDID_CLR_MAC_FILTERS 0x228
++
++#define DPNI_CMDID_SET_VLAN_FILTERS 0x230
++#define DPNI_CMDID_ADD_VLAN_ID 0x231
++#define DPNI_CMDID_REMOVE_VLAN_ID 0x232
++#define DPNI_CMDID_CLR_VLAN_FILTERS 0x233
++
++#define DPNI_CMDID_SET_RX_TC_DIST 0x235
++#define DPNI_CMDID_SET_TX_FLOW 0x236
++#define DPNI_CMDID_GET_TX_FLOW 0x237
++#define DPNI_CMDID_SET_RX_FLOW 0x238
++#define DPNI_CMDID_GET_RX_FLOW 0x239
++#define DPNI_CMDID_SET_RX_ERR_QUEUE 0x23A
++#define DPNI_CMDID_GET_RX_ERR_QUEUE 0x23B
++
++#define DPNI_CMDID_SET_RX_TC_POLICING 0x23E
++#define DPNI_CMDID_SET_RX_TC_EARLY_DROP 0x23F
++
++#define DPNI_CMDID_SET_QOS_TBL 0x240
++#define DPNI_CMDID_ADD_QOS_ENT 0x241
++#define DPNI_CMDID_REMOVE_QOS_ENT 0x242
++#define DPNI_CMDID_CLR_QOS_TBL 0x243
++#define DPNI_CMDID_ADD_FS_ENT 0x244
++#define DPNI_CMDID_REMOVE_FS_ENT 0x245
++#define DPNI_CMDID_CLR_FS_ENT 0x246
++#define DPNI_CMDID_SET_VLAN_INSERTION 0x247
++#define DPNI_CMDID_SET_VLAN_REMOVAL 0x248
++#define DPNI_CMDID_SET_IPR 0x249
++#define DPNI_CMDID_SET_IPF 0x24A
++
++#define DPNI_CMDID_SET_TX_SELECTION 0x250
++#define DPNI_CMDID_GET_RX_TC_POLICING 0x251
++#define DPNI_CMDID_GET_RX_TC_EARLY_DROP 0x252
++#define DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION 0x253
++#define DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION 0x254
++#define DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION 0x255
++#define DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION 0x256
++#define DPNI_CMDID_SET_TX_CONF 0x257
++#define DPNI_CMDID_GET_TX_CONF 0x258
++#define DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION 0x259
++#define DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION 0x25A
++#define DPNI_CMDID_SET_TX_TC_EARLY_DROP 0x25B
++#define DPNI_CMDID_GET_TX_TC_EARLY_DROP 0x25C
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_OPEN(cmd, dpni_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id)
++
++#define DPNI_PREP_EXTENDED_CFG(ext, cfg) \
++do { \
++ MC_PREP_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \
++ MC_PREP_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \
++ MC_PREP_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \
++ MC_PREP_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \
++ MC_PREP_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \
++ MC_PREP_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \
++ MC_PREP_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \
++ MC_PREP_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \
++ MC_PREP_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \
++ MC_PREP_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \
++ MC_PREP_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \
++ MC_PREP_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \
++ MC_PREP_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \
++ MC_PREP_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \
++ MC_PREP_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \
++ MC_PREP_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \
++ MC_PREP_OP(ext, 4, 0, 16, uint16_t, \
++ cfg->ipr_cfg.max_open_frames_ipv4); \
++ MC_PREP_OP(ext, 4, 16, 16, uint16_t, \
++ cfg->ipr_cfg.max_open_frames_ipv6); \
++ MC_PREP_OP(ext, 4, 32, 16, uint16_t, \
++ cfg->ipr_cfg.max_reass_frm_size); \
++ MC_PREP_OP(ext, 5, 0, 16, uint16_t, \
++ cfg->ipr_cfg.min_frag_size_ipv4); \
++ MC_PREP_OP(ext, 5, 16, 16, uint16_t, \
++ cfg->ipr_cfg.min_frag_size_ipv6); \
++} while (0)
++
++#define DPNI_EXT_EXTENDED_CFG(ext, cfg) \
++do { \
++ MC_EXT_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \
++ MC_EXT_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \
++ MC_EXT_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \
++ MC_EXT_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \
++ MC_EXT_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \
++ MC_EXT_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \
++ MC_EXT_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \
++ MC_EXT_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \
++ MC_EXT_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \
++ MC_EXT_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \
++ MC_EXT_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \
++ MC_EXT_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \
++ MC_EXT_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \
++ MC_EXT_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \
++ MC_EXT_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \
++ MC_EXT_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \
++ MC_EXT_OP(ext, 4, 0, 16, uint16_t, \
++ cfg->ipr_cfg.max_open_frames_ipv4); \
++ MC_EXT_OP(ext, 4, 16, 16, uint16_t, \
++ cfg->ipr_cfg.max_open_frames_ipv6); \
++ MC_EXT_OP(ext, 4, 32, 16, uint16_t, \
++ cfg->ipr_cfg.max_reass_frm_size); \
++ MC_EXT_OP(ext, 5, 0, 16, uint16_t, \
++ cfg->ipr_cfg.min_frag_size_ipv4); \
++ MC_EXT_OP(ext, 5, 16, 16, uint16_t, \
++ cfg->ipr_cfg.min_frag_size_ipv6); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->adv.max_tcs); \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->adv.max_senders); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]); \
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]); \
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->adv.options); \
++ MC_CMD_OP(cmd, 2, 0, 8, uint8_t, cfg->adv.max_unicast_filters); \
++ MC_CMD_OP(cmd, 2, 8, 8, uint8_t, cfg->adv.max_multicast_filters); \
++ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, cfg->adv.max_vlan_filters); \
++ MC_CMD_OP(cmd, 2, 24, 8, uint8_t, cfg->adv.max_qos_entries); \
++ MC_CMD_OP(cmd, 2, 32, 8, uint8_t, cfg->adv.max_qos_key_size); \
++ MC_CMD_OP(cmd, 2, 48, 8, uint8_t, cfg->adv.max_dist_key_size); \
++ MC_CMD_OP(cmd, 2, 56, 8, enum net_prot, cfg->adv.start_hdr); \
++ MC_CMD_OP(cmd, 4, 48, 8, uint8_t, cfg->adv.max_policers); \
++ MC_CMD_OP(cmd, 4, 56, 8, uint8_t, cfg->adv.max_congestion_ctrl); \
++ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, cfg->adv.ext_cfg_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_POOLS(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \
++ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \
++ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \
++ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \
++ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \
++ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \
++ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \
++ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \
++ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \
++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \
++ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\
++ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \
++ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\
++ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\
++ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\
++ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \
++ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\
++ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \
++ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_ATTR(cmd, attr) \
++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, attr->ext_cfg_iova)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \
++ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \
++ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options); \
++ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \
++ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\
++ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \
++ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \
++ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \
++ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \
++ MC_RSP_OP(cmd, 4, 48, 8, uint8_t, attr->max_policers); \
++ MC_RSP_OP(cmd, 4, 56, 8, uint8_t, attr->max_congestion_ctrl); \
++ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \
++ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \
++ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_QDID(cmd, qdid) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_SP_INFO(cmd, sp_info) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, sp_info->spids[0]); \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, sp_info->spids[1]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_COUNTER(cmd, counter) \
++ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_COUNTER(cmd, value) \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_LINK_STATE(cmd, state) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, tx_shaper->max_burst_size);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, tx_shaper->rate_limit);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_MTU(cmd, mtu) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, mtu)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_MTU(cmd, mtu) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, mtu)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
++do { \
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
++ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
++ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
++ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \
++ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_VLAN_FILTERS(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_SELECTION(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\
++ MC_CMD_OP(cmd, 0, 16, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[0].mode); \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\
++ MC_CMD_OP(cmd, 0, 48, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[1].mode); \
++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\
++ MC_CMD_OP(cmd, 1, 16, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[2].mode); \
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\
++ MC_CMD_OP(cmd, 1, 48, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[3].mode); \
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\
++ MC_CMD_OP(cmd, 2, 16, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[4].mode); \
++ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\
++ MC_CMD_OP(cmd, 2, 48, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[5].mode); \
++ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\
++ MC_CMD_OP(cmd, 3, 16, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[6].mode); \
++ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\
++ MC_CMD_OP(cmd, 3, 48, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[7].mode); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \
++ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \
++ cfg->fs_cfg.miss_action); \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \
++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\
++ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\
++ MC_CMD_OP(cmd, 0, 45, 1, int, cfg->use_common_tx_conf_queue);\
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \
++ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\
++ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\
++ MC_RSP_OP(cmd, 0, 45, 1, int, attr->use_common_tx_conf_queue);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\
++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
++ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \
++ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \
++ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
++ cfg->flc_cfg.frame_data_size);\
++ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
++ cfg->flc_cfg.flow_context_size);\
++ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\
++ MC_CMD_OP(cmd, 5, 0, 32, uint32_t, cfg->tail_drop_threshold); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \
++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \
++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \
++ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \
++ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
++ attr->flc_cfg.frame_data_size);\
++ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
++ attr->flc_cfg.flow_context_size);\
++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\
++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options); \
++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->tail_drop_threshold); \
++ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \
++ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
++ cfg->flc_cfg.frame_data_size);\
++ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
++ cfg->flc_cfg.flow_context_size);\
++ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type);\
++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \
++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \
++ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \
++ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
++ attr->flc_cfg.frame_data_size);\
++ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
++ attr->flc_cfg.flow_context_size);\
++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, revoke)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_QOS_TABLE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->default_tc); \
++ MC_CMD_OP(cmd, 0, 40, 1, int, cfg->discard_on_miss); \
++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id) \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_VLAN_INSERTION(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_VLAN_REMOVAL(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_IPR(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_IPF(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \
++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \
++ MC_CMD_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \
++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id) \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \
++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \
++ MC_RSP_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \
++ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \
++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_PREP_EARLY_DROP(ext, cfg) \
++do { \
++ MC_PREP_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \
++ MC_PREP_OP(ext, 0, 2, 2, \
++ enum dpni_congestion_unit, cfg->units); \
++ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
++ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
++ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
++ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
++ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
++ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
++ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
++ MC_PREP_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \
++ MC_PREP_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \
++ MC_PREP_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_EXT_EARLY_DROP(ext, cfg) \
++do { \
++ MC_EXT_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \
++ MC_EXT_OP(ext, 0, 2, 2, \
++ enum dpni_congestion_unit, cfg->units); \
++ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
++ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
++ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
++ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
++ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
++ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
++ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
++ MC_EXT_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \
++ MC_EXT_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \
++ MC_EXT_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
++} while (0)
++
++#define DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#define DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id)
++
++#define DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#define DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#define DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id)
++
++#define DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#define DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->queue_cfg.dest_cfg.priority); \
++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \
++ cfg->queue_cfg.dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->errors_only); \
++ MC_CMD_OP(cmd, 0, 46, 1, int, cfg->queue_cfg.order_preservation_en); \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->queue_cfg.user_ctx); \
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->queue_cfg.options); \
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->queue_cfg.dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 3, 0, 32, uint32_t, \
++ cfg->queue_cfg.tail_drop_threshold); \
++ MC_CMD_OP(cmd, 4, 0, 4, enum dpni_flc_type, \
++ cfg->queue_cfg.flc_cfg.flc_type); \
++ MC_CMD_OP(cmd, 4, 4, 4, enum dpni_stash_size, \
++ cfg->queue_cfg.flc_cfg.frame_data_size); \
++ MC_CMD_OP(cmd, 4, 8, 4, enum dpni_stash_size, \
++ cfg->queue_cfg.flc_cfg.flow_context_size); \
++ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->queue_cfg.flc_cfg.options); \
++ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, \
++ cfg->queue_cfg.flc_cfg.flow_context); \
++} while (0)
++
++#define DPNI_CMD_GET_TX_CONF(cmd, flow_id) \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id)
++
++#define DPNI_RSP_GET_TX_CONF(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \
++ attr->queue_attr.dest_cfg.priority); \
++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \
++ attr->queue_attr.dest_cfg.dest_type); \
++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->errors_only); \
++ MC_RSP_OP(cmd, 0, 46, 1, int, \
++ attr->queue_attr.order_preservation_en); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->queue_attr.user_ctx); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, attr->queue_attr.dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, \
++ attr->queue_attr.tail_drop_threshold); \
++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->queue_attr.fqid); \
++ MC_RSP_OP(cmd, 4, 0, 4, enum dpni_flc_type, \
++ attr->queue_attr.flc_cfg.flc_type); \
++ MC_RSP_OP(cmd, 4, 4, 4, enum dpni_stash_size, \
++ attr->queue_attr.flc_cfg.frame_data_size); \
++ MC_RSP_OP(cmd, 4, 8, 4, enum dpni_stash_size, \
++ attr->queue_attr.flc_cfg.flow_context_size); \
++ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, attr->queue_attr.flc_cfg.options); \
++ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, \
++ attr->queue_attr.flc_cfg.flow_context); \
++} while (0)
++
++#define DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#define DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id) \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id)
++
++#define DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#endif /* _FSL_DPNI_CMD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+@@ -0,0 +1,1907 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../fsl-mc/include/mc-sys.h"
++#include "../../fsl-mc/include/mc-cmd.h"
++#include "dpni.h"
++#include "dpni-cmd.h"
++
++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
++ uint8_t *key_cfg_buf)
++{
++ int i, j;
++ int offset = 0;
++ int param = 1;
++ uint64_t *params = (uint64_t *)key_cfg_buf;
++
++ if (!key_cfg_buf || !cfg)
++ return -EINVAL;
++
++ params[0] |= mc_enc(0, 8, cfg->num_extracts);
++ params[0] = cpu_to_le64(params[0]);
++
++ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS)
++ return -EINVAL;
++
++ for (i = 0; i < cfg->num_extracts; i++) {
++ switch (cfg->extracts[i].type) {
++ case DPKG_EXTRACT_FROM_HDR:
++ params[param] |= mc_enc(0, 8,
++ cfg->extracts[i].extract.from_hdr.prot);
++ params[param] |= mc_enc(8, 4,
++ cfg->extracts[i].extract.from_hdr.type);
++ params[param] |= mc_enc(16, 8,
++ cfg->extracts[i].extract.from_hdr.size);
++ params[param] |= mc_enc(24, 8,
++ cfg->extracts[i].extract.
++ from_hdr.offset);
++ params[param] |= mc_enc(32, 32,
++ cfg->extracts[i].extract.
++ from_hdr.field);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ params[param] |= mc_enc(0, 8,
++ cfg->extracts[i].extract.
++ from_hdr.hdr_index);
++ break;
++ case DPKG_EXTRACT_FROM_DATA:
++ params[param] |= mc_enc(16, 8,
++ cfg->extracts[i].extract.
++ from_data.size);
++ params[param] |= mc_enc(24, 8,
++ cfg->extracts[i].extract.
++ from_data.offset);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ break;
++ case DPKG_EXTRACT_FROM_PARSE:
++ params[param] |= mc_enc(16, 8,
++ cfg->extracts[i].extract.
++ from_parse.size);
++ params[param] |= mc_enc(24, 8,
++ cfg->extracts[i].extract.
++ from_parse.offset);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ break;
++ default:
++ return -EINVAL;
++ }
++ params[param] |= mc_enc(
++ 24, 8, cfg->extracts[i].num_of_byte_masks);
++ params[param] |= mc_enc(32, 4, cfg->extracts[i].type);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ for (offset = 0, j = 0;
++ j < DPKG_NUM_OF_MASKS;
++ offset += 16, j++) {
++ params[param] |= mc_enc(
++ (offset), 8, cfg->extracts[i].masks[j].mask);
++ params[param] |= mc_enc(
++ (offset + 8), 8,
++ cfg->extracts[i].masks[j].offset);
++ }
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ }
++ return 0;
++}
++
++int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg,
++ uint8_t *ext_cfg_buf)
++{
++ uint64_t *ext_params = (uint64_t *)ext_cfg_buf;
++
++ DPNI_PREP_EXTENDED_CFG(ext_params, cfg);
++
++ return 0;
++}
++
++int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg,
++ const uint8_t *ext_cfg_buf)
++{
++ uint64_t *ext_params = (uint64_t *)ext_cfg_buf;
++
++ DPNI_EXT_EXTENDED_CFG(ext_params, cfg);
++
++ return 0;
++}
++
++int dpni_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpni_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPNI_CMD_OPEN(cmd, dpni_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpni_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpni_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPNI_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpni_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_pools(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_pools_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_POOLS(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpni_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpni_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpni_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpni_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_ATTR(cmd, attr);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_error_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout);
++
++ return 0;
++}
++
++int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout);
++
++ return 0;
++}
++
++int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout);
++
++ return 0;
++}
++
++int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L3_CHKSUM_VALIDATION,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, *en);
++
++ return 0;
++}
++
++int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L3_CHKSUM_VALIDATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L4_CHKSUM_VALIDATION,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, *en);
++
++ return 0;
++}
++
++int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L4_CHKSUM_VALIDATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_qdid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *qdid)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_QDID(cmd, *qdid);
++
++ return 0;
++}
++
++int dpni_get_sp_info(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_sp_info *sp_info)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_SP_INFO(cmd, sp_info);
++
++ return 0;
++}
++
++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *data_offset)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset);
++
++ return 0;
++}
++
++int dpni_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpni_counter counter,
++ uint64_t *value)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_COUNTER(cmd, counter);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_COUNTER(cmd, *value);
++
++ return 0;
++}
++
++int dpni_set_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpni_counter counter,
++ uint64_t value)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_COUNTER(cmd, counter, value);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_LINK_CFG(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_link_state *state)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_LINK_STATE(cmd, state);
++
++ return 0;
++}
++
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_tx_shaping_cfg *tx_shaper)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t max_frame_length)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *max_frame_length)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length);
++
++ return 0;
++}
++
++int dpni_set_mtu(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t mtu)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MTU,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_MTU(cmd, mtu);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_mtu(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *mtu)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MTU,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_MTU(cmd, *mtu);
++
++ return 0;
++}
++
++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en);
++
++ return 0;
++}
++
++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en);
++
++ return 0;
++}
++
++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr);
++
++ return 0;
++}
++
++int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
++ cmd_flags,
++ token);
++ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
++ cmd_flags,
++ token);
++ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int unicast,
++ int multicast)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
++ cmd_flags,
++ token);
++ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_vlan_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_FILTERS,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_VLAN_FILTERS(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
++ cmd_flags,
++ token);
++ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
++ cmd_flags,
++ token);
++ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_tx_selection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_tx_selection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SELECTION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_SELECTION(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rx_tc_dist_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_tx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *flow_id,
++ const struct dpni_tx_flow_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id);
++
++ return 0;
++}
++
++int dpni_get_tx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_tx_flow_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_TX_FLOW(cmd, flow_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_TX_FLOW(cmd, attr);
++
++ return 0;
++}
++
++int dpni_set_rx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint16_t flow_id,
++ const struct dpni_queue_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint16_t flow_id,
++ struct dpni_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_RX_FLOW(cmd, attr);
++
++ return 0;
++}
++
++int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_queue_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_ERR_QUEUE,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_ERR_QUEUE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr);
++
++ return 0;
++}
++
++int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int revoke)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_REVOKE,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_qos_table(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_qos_tbl_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_QOS_TABLE(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_rule_cfg *cfg,
++ uint8_t tc_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
++ cmd_flags,
++ token);
++ DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_rule_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
++ cmd_flags,
++ token);
++ DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rule_cfg *cfg,
++ uint16_t flow_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
++ cmd_flags,
++ token);
++ DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rule_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
++ cmd_flags,
++ token);
++ DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT,
++ cmd_flags,
++ token);
++ DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_INSERTION,
++ cmd_flags, token);
++ DPNI_CMD_SET_VLAN_INSERTION(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_vlan_removal(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_REMOVAL,
++ cmd_flags, token);
++ DPNI_CMD_SET_VLAN_REMOVAL(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_ipr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPR,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_IPR(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_ipf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPF,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_IPF(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rx_tc_policing_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_rx_tc_policing_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg);
++
++ return 0;
++}
++
++void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg,
++ uint8_t *early_drop_buf)
++{
++ uint64_t *ext_params = (uint64_t *)early_drop_buf;
++
++ DPNI_PREP_EARLY_DROP(ext_params, cfg);
++}
++
++void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
++ const uint8_t *early_drop_buf)
++{
++ uint64_t *ext_params = (uint64_t *)early_drop_buf;
++
++ DPNI_EXT_EARLY_DROP(ext_params, cfg);
++}
++
++int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_EARLY_DROP,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_EARLY_DROP,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_TC_EARLY_DROP,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_TC_EARLY_DROP,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg);
++
++ return 0;
++}
++
++int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg);
++
++ return 0;
++}
++
++int dpni_set_tx_conf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ const struct dpni_tx_conf_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_conf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_tx_conf_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_TX_CONF(cmd, flow_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPNI_RSP_GET_TX_CONF(cmd, attr);
++
++ return 0;
++}
++
++int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ const struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg);
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+@@ -0,0 +1,2581 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPNI_H
++#define __FSL_DPNI_H
++
++#include "dpkg.h"
++
++struct fsl_mc_io;
++
++/**
++ * Data Path Network Interface API
++ * Contains initialization APIs and runtime control APIs for DPNI
++ */
++
++/** General DPNI macros */
++
++/**
++ * Maximum number of traffic classes
++ */
++#define DPNI_MAX_TC 8
++/**
++ * Maximum number of buffer pools per DPNI
++ */
++#define DPNI_MAX_DPBP 8
++/**
++ * Maximum number of storage-profiles per DPNI
++ */
++#define DPNI_MAX_SP 2
++
++/**
++ * All traffic classes considered; see dpni_set_rx_flow()
++ */
++#define DPNI_ALL_TCS (uint8_t)(-1)
++/**
++ * All flows within traffic class considered; see dpni_set_rx_flow()
++ */
++#define DPNI_ALL_TC_FLOWS (uint16_t)(-1)
++/**
++ * Generate new flow ID; see dpni_set_tx_flow()
++ */
++#define DPNI_NEW_FLOW_ID (uint16_t)(-1)
++/* use for common tx-conf queue; see dpni_set_tx_conf_<x>() */
++#define DPNI_COMMON_TX_CONF (uint16_t)(-1)
++
++/**
++ * dpni_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpni_id: DPNI unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpni_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpni_id,
++ uint16_t *token);
++
++/**
++ * dpni_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/* DPNI configuration options */
++
++/**
++ * Allow different distribution key profiles for different traffic classes;
++ * if not set, a single key profile is assumed
++ */
++#define DPNI_OPT_ALLOW_DIST_KEY_PER_TC 0x00000001
++
++/**
++ * Disable all non-error transmit confirmation; error frames are reported
++ * back to a common Tx error queue
++ */
++#define DPNI_OPT_TX_CONF_DISABLED 0x00000002
++
++/**
++ * Disable per-sender private Tx confirmation/error queue
++ */
++#define DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED 0x00000004
++
++/**
++ * Support distribution based on hashed key;
++ * allows statistical distribution over receive queues in a traffic class
++ */
++#define DPNI_OPT_DIST_HASH 0x00000010
++
++/**
++ * DEPRECATED - if this flag is selected and and all new 'max_fs_entries' are
++ * '0' then backward compatibility is preserved;
++ * Support distribution based on flow steering;
++ * allows explicit control of distribution over receive queues in a traffic
++ * class
++ */
++#define DPNI_OPT_DIST_FS 0x00000020
++
++/**
++ * Unicast filtering support
++ */
++#define DPNI_OPT_UNICAST_FILTER 0x00000080
++/**
++ * Multicast filtering support
++ */
++#define DPNI_OPT_MULTICAST_FILTER 0x00000100
++/**
++ * VLAN filtering support
++ */
++#define DPNI_OPT_VLAN_FILTER 0x00000200
++/**
++ * Support IP reassembly on received packets
++ */
++#define DPNI_OPT_IPR 0x00000800
++/**
++ * Support IP fragmentation on transmitted packets
++ */
++#define DPNI_OPT_IPF 0x00001000
++/**
++ * VLAN manipulation support
++ */
++#define DPNI_OPT_VLAN_MANIPULATION 0x00010000
++/**
++ * Support masking of QoS lookup keys
++ */
++#define DPNI_OPT_QOS_MASK_SUPPORT 0x00020000
++/**
++ * Support masking of Flow Steering lookup keys
++ */
++#define DPNI_OPT_FS_MASK_SUPPORT 0x00040000
++
++/**
++ * struct dpni_extended_cfg - Structure representing extended DPNI configuration
++ * @tc_cfg: TCs configuration
++ * @ipr_cfg: IP reassembly configuration
++ */
++struct dpni_extended_cfg {
++ /**
++ * struct tc_cfg - TC configuration
++ * @max_dist: Maximum distribution size for Rx traffic class;
++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
++ * 112,128,192,224,256,384,448,512,768,896,1024;
++ * value '0' will be treated as '1'.
++ * other unsupported values will be round down to the nearest
++ * supported value.
++ * @max_fs_entries: Maximum FS entries for Rx traffic class;
++ * '0' means no support for this TC;
++ */
++ struct {
++ uint16_t max_dist;
++ uint16_t max_fs_entries;
++ } tc_cfg[DPNI_MAX_TC];
++ /**
++ * struct ipr_cfg - Structure representing IP reassembly configuration
++ * @max_reass_frm_size: Maximum size of the reassembled frame
++ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments
++ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments
++ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly
++ * process
++ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly
++ * process
++ */
++ struct {
++ uint16_t max_reass_frm_size;
++ uint16_t min_frag_size_ipv4;
++ uint16_t min_frag_size_ipv6;
++ uint16_t max_open_frames_ipv4;
++ uint16_t max_open_frames_ipv6;
++ } ipr_cfg;
++};
++
++/**
++ * dpni_prepare_extended_cfg() - function prepare extended parameters
++ * @cfg: extended structure
++ * @ext_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before dpni_create()
++ */
++int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg,
++ uint8_t *ext_cfg_buf);
++
++/**
++ * struct dpni_cfg - Structure representing DPNI configuration
++ * @mac_addr: Primary MAC address
++ * @adv: Advanced parameters; default is all zeros;
++ * use this structure to change default settings
++ */
++struct dpni_cfg {
++ uint8_t mac_addr[6];
++ /**
++ * struct adv - Advanced parameters
++ * @options: Mask of available options; use 'DPNI_OPT_<X>' values
++ * @start_hdr: Selects the packet starting header for parsing;
++ * 'NET_PROT_NONE' is treated as default: 'NET_PROT_ETH'
++ * @max_senders: Maximum number of different senders; used as the number
++ * of dedicated Tx flows; Non-power-of-2 values are rounded
++ * up to the next power-of-2 value as hardware demands it;
++ * '0' will be treated as '1'
++ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx);
++ * '0' will e treated as '1'
++ * @max_unicast_filters: Maximum number of unicast filters;
++ * '0' is treated as '16'
++ * @max_multicast_filters: Maximum number of multicast filters;
++ * '0' is treated as '64'
++ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in
++ * the QoS table; '0' is treated as '64'
++ * @max_qos_key_size: Maximum key size for the QoS look-up;
++ * '0' is treated as '24' which is enough for IPv4
++ * 5-tuple
++ * @max_dist_key_size: Maximum key size for the distribution;
++ * '0' is treated as '24' which is enough for IPv4 5-tuple
++ * @max_policers: Maximum number of policers;
++ * should be between '0' and max_tcs
++ * @max_congestion_ctrl: Maximum number of congestion control groups
++ * (CGs); covers early drop and congestion notification
++ * requirements;
++ * should be between '0' and ('max_tcs' + 'max_senders')
++ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory
++ * filled with the extended configuration by calling
++ * dpni_prepare_extended_cfg()
++ */
++ struct {
++ uint32_t options;
++ enum net_prot start_hdr;
++ uint8_t max_senders;
++ uint8_t max_tcs;
++ uint8_t max_unicast_filters;
++ uint8_t max_multicast_filters;
++ uint8_t max_vlan_filters;
++ uint8_t max_qos_entries;
++ uint8_t max_qos_key_size;
++ uint8_t max_dist_key_size;
++ uint8_t max_policers;
++ uint8_t max_congestion_ctrl;
++ uint64_t ext_cfg_iova;
++ } adv;
++};
++
++/**
++ * dpni_create() - Create the DPNI object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPNI object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpni_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpni_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpni_destroy() - Destroy the DPNI object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpni_pools_cfg - Structure representing buffer pools configuration
++ * @num_dpbp: Number of DPBPs
++ * @pools: Array of buffer pools parameters; The number of valid entries
++ * must match 'num_dpbp' value
++ */
++struct dpni_pools_cfg {
++ uint8_t num_dpbp;
++ /**
++ * struct pools - Buffer pools parameters
++ * @dpbp_id: DPBP object ID
++ * @buffer_size: Buffer size
++ * @backup_pool: Backup pool
++ */
++ struct {
++ int dpbp_id;
++ uint16_t buffer_size;
++ int backup_pool;
++ } pools[DPNI_MAX_DPBP];
++};
++
++/**
++ * dpni_set_pools() - Set buffer pools configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Buffer pools configuration
++ *
++ * mandatory for DPNI operation
++ * warning:Allowed only when DPNI is disabled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_pools(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_pools_cfg *cfg);
++
++/**
++ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpni_is_enabled() - Check if the DPNI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpni_reset() - Reset the DPNI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * DPNI IRQ Index and Events
++ */
++
++/**
++ * IRQ index
++ */
++#define DPNI_IRQ_INDEX 0
++/**
++ * IRQ event - indicates a change in link state
++ */
++#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
++
++/**
++ * struct dpni_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpni_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpni_set_irq() - Set IRQ information for the DPNI to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpni_irq_cfg *irq_cfg);
++
++/**
++ * dpni_get_irq() - Get IRQ information from the DPNI.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpni_irq_cfg *irq_cfg);
++
++/**
++ * dpni_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state: - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpni_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpni_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpni_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpni_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpni_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpni_attr - Structure representing DPNI attributes
++ * @id: DPNI object ID
++ * @version: DPNI version
++ * @start_hdr: Indicates the packet starting header for parsing
++ * @options: Mask of available options; reflects the value as was given in
++ * object's creation
++ * @max_senders: Maximum number of different senders; used as the number
++ * of dedicated Tx flows;
++ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx)
++ * @max_unicast_filters: Maximum number of unicast filters
++ * @max_multicast_filters: Maximum number of multicast filters
++ * @max_vlan_filters: Maximum number of VLAN filters
++ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table
++ * @max_qos_key_size: Maximum key size for the QoS look-up
++ * @max_dist_key_size: Maximum key size for the distribution look-up
++ * @max_policers: Maximum number of policers;
++ * @max_congestion_ctrl: Maximum number of congestion control groups (CGs);
++ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory;
++ * call dpni_extract_extended_cfg() to extract the extended configuration
++ */
++struct dpni_attr {
++ int id;
++ /**
++ * struct version - DPNI version
++ * @major: DPNI major version
++ * @minor: DPNI minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ enum net_prot start_hdr;
++ uint32_t options;
++ uint8_t max_senders;
++ uint8_t max_tcs;
++ uint8_t max_unicast_filters;
++ uint8_t max_multicast_filters;
++ uint8_t max_vlan_filters;
++ uint8_t max_qos_entries;
++ uint8_t max_qos_key_size;
++ uint8_t max_dist_key_size;
++ uint8_t max_policers;
++ uint8_t max_congestion_ctrl;
++ uint64_t ext_cfg_iova;
++};
++
++/**
++ * dpni_get_attributes() - Retrieve DPNI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @attr: Object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_attr *attr);
++
++/**
++ * dpni_extract_extended_cfg() - extract the extended parameters
++ * @cfg: extended structure
++ * @ext_cfg_buf: 256 bytes of DMA-able memory
++ *
++ * This function has to be called after dpni_get_attributes()
++ */
++int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg,
++ const uint8_t *ext_cfg_buf);
++
++/**
++ * DPNI errors
++ */
++
++/**
++ * Extract out of frame header error
++ */
++#define DPNI_ERROR_EOFHE 0x00020000
++/**
++ * Frame length error
++ */
++#define DPNI_ERROR_FLE 0x00002000
++/**
++ * Frame physical error
++ */
++#define DPNI_ERROR_FPE 0x00001000
++/**
++ * Parsing header error
++ */
++#define DPNI_ERROR_PHE 0x00000020
++/**
++ * Parser L3 checksum error
++ */
++#define DPNI_ERROR_L3CE 0x00000004
++/**
++ * Parser L3 checksum error
++ */
++#define DPNI_ERROR_L4CE 0x00000001
++
++/**
++ * enum dpni_error_action - Defines DPNI behavior for errors
++ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
++ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
++ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
++ */
++enum dpni_error_action {
++ DPNI_ERROR_ACTION_DISCARD = 0,
++ DPNI_ERROR_ACTION_CONTINUE = 1,
++ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
++};
++
++/**
++ * struct dpni_error_cfg - Structure representing DPNI errors treatment
++ * @errors: Errors mask; use 'DPNI_ERROR__<X>
++ * @error_action: The desired action for the errors mask
++ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
++ * status (FAS); relevant only for the non-discard action
++ */
++struct dpni_error_cfg {
++ uint32_t errors;
++ enum dpni_error_action error_action;
++ int set_frame_annotation;
++};
++
++/**
++ * dpni_set_errors_behavior() - Set errors behavior
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Errors configuration
++ *
++ * this function may be called numerous times with different
++ * error masks
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_error_cfg *cfg);
++
++/**
++ * DPNI buffer layout modification options
++ */
++
++/**
++ * Select to modify the time-stamp setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
++/**
++ * Select to modify the parser-result setting; not applicable for Tx
++ */
++#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
++/**
++ * Select to modify the frame-status setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
++/**
++ * Select to modify the private-data-size setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
++/**
++ * Select to modify the data-alignment setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
++/**
++ * Select to modify the data-head-room setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
++/**
++ * Select to modify the data-tail-room setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
++
++/**
++ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
++ * @options: Flags representing the suggested modifications to the buffer
++ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
++ * @pass_timestamp: Pass timestamp value
++ * @pass_parser_result: Pass parser results
++ * @pass_frame_status: Pass frame status
++ * @private_data_size: Size kept for private data (in bytes)
++ * @data_align: Data alignment
++ * @data_head_room: Data head room
++ * @data_tail_room: Data tail room
++ */
++struct dpni_buffer_layout {
++ uint32_t options;
++ int pass_timestamp;
++ int pass_parser_result;
++ int pass_frame_status;
++ uint16_t private_data_size;
++ uint16_t data_align;
++ uint16_t data_head_room;
++ uint16_t data_tail_room;
++};
++
++/**
++ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Returns buffer layout attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Buffer layout configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Returns buffer layout attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_set_tx_buffer_layout() - Set Tx buffer layout configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Buffer layout configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout
++ * attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Returns buffer layout attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout
++ * configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Buffer layout configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_set_l3_chksum_validation() - Enable/disable L3 checksum validation
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_get_l3_chksum_validation() - Get L3 checksum validation mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpni_set_l4_chksum_validation() - Enable/disable L4 checksum validation
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_get_l4_chksum_validation() - Get L4 checksum validation mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
++ * for enqueue operations
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qdid: Returned virtual QDID value that should be used as an argument
++ * in all enqueue operations
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_qdid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *qdid);
++
++/**
++ * struct dpni_sp_info - Structure representing DPNI storage-profile information
++ * (relevant only for DPNI owned by AIOP)
++ * @spids: array of storage-profiles
++ */
++struct dpni_sp_info {
++ uint16_t spids[DPNI_MAX_SP];
++};
++
++/**
++ * dpni_get_spids() - Get the AIOP storage profile IDs associated with the DPNI
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @sp_info: Returned AIOP storage-profile information
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Only relevant for DPNI that belongs to AIOP container.
++ */
++int dpni_get_sp_info(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_sp_info *sp_info);
++
++/**
++ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @data_offset: Tx data offset (from start of buffer)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *data_offset);
++
++/**
++ * enum dpni_counter - DPNI counter types
++ * @DPNI_CNT_ING_FRAME: Counts ingress frames
++ * @DPNI_CNT_ING_BYTE: Counts ingress bytes
++ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit
++ * 'drop' setting
++ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors
++ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
++ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
++ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
++ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
++ * @DPNI_CNT_EGR_FRAME: Counts egress frames
++ * @DPNI_CNT_EGR_BYTE: Counts egress bytes
++ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors
++ */
++enum dpni_counter {
++ DPNI_CNT_ING_FRAME = 0x0,
++ DPNI_CNT_ING_BYTE = 0x1,
++ DPNI_CNT_ING_FRAME_DROP = 0x2,
++ DPNI_CNT_ING_FRAME_DISCARD = 0x3,
++ DPNI_CNT_ING_MCAST_FRAME = 0x4,
++ DPNI_CNT_ING_MCAST_BYTE = 0x5,
++ DPNI_CNT_ING_BCAST_FRAME = 0x6,
++ DPNI_CNT_ING_BCAST_BYTES = 0x7,
++ DPNI_CNT_EGR_FRAME = 0x8,
++ DPNI_CNT_EGR_BYTE = 0x9,
++ DPNI_CNT_EGR_FRAME_DISCARD = 0xa
++};
++
++/**
++ * dpni_get_counter() - Read a specific DPNI counter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @counter: The requested counter
++ * @value: Returned counter's current value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpni_counter counter,
++ uint64_t *value);
++
++/**
++ * dpni_set_counter() - Set (or clear) a specific DPNI counter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @counter: The requested counter
++ * @value: New counter value; typically pass '0' for resetting
++ * the counter.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpni_counter counter,
++ uint64_t value);
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct - Structure representing DPNI link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
++ */
++struct dpni_link_cfg {
++ uint32_t rate;
++ uint64_t options;
++};
++
++/**
++ * dpni_set_link_cfg() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_link_cfg *cfg);
++
++/**
++ * struct dpni_link_state - Structure representing DPNI link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
++ * @up: Link state; '0' for down, '1' for up
++ */
++struct dpni_link_state {
++ uint32_t rate;
++ uint64_t options;
++ int up;
++};
++
++/**
++ * dpni_get_link_state() - Return the link state (either up or down)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @state: Returned link state;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_link_state *state);
++
++/**
++ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
++ * @rate_limit: rate in Mbps
++ * @max_burst_size: burst size in bytes (up to 64KB)
++ */
++struct dpni_tx_shaping_cfg {
++ uint32_t rate_limit;
++ uint16_t max_burst_size;
++};
++
++/**
++ * dpni_set_tx_shaping() - Set the transmit shaping
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tx_shaper: tx shaping configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_tx_shaping_cfg *tx_shaper);
++
++/**
++ * dpni_set_max_frame_length() - Set the maximum received frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @max_frame_length: Maximum received frame length (in
++ * bytes); frame is discarded if its
++ * length exceeds this value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t max_frame_length);
++
++/**
++ * dpni_get_max_frame_length() - Get the maximum received frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @max_frame_length: Maximum received frame length (in
++ * bytes); frame is discarded if its
++ * length exceeds this value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *max_frame_length);
++
++/**
++ * dpni_set_mtu() - Set the MTU for the interface.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mtu: MTU length (in bytes)
++ *
++ * MTU determines the maximum fragment size for performing IP
++ * fragmentation on egress packets.
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_mtu(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t mtu);
++
++/**
++ * dpni_get_mtu() - Get the MTU.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mtu: Returned MTU length (in bytes)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_mtu(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *mtu);
++
++/**
++ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpni_set_primary_mac_addr() - Set the primary MAC address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to set as primary address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6]);
++
++/**
++ * dpni_get_primary_mac_addr() - Get the primary MAC address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: Returned MAC address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t mac_addr[6]);
++
++/**
++ * dpni_add_mac_addr() - Add MAC address filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to add
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6]);
++
++/**
++ * dpni_remove_mac_addr() - Remove MAC address filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6]);
++
++/**
++ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @unicast: Set to '1' to clear unicast addresses
++ * @multicast: Set to '1' to clear multicast addresses
++ *
++ * The primary MAC address is not cleared by this operation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int unicast,
++ int multicast);
++
++/**
++ * dpni_set_vlan_filters() - Enable/disable VLAN filtering mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_vlan_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_add_vlan_id() - Add VLAN ID filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @vlan_id: VLAN ID to add
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id);
++
++/**
++ * dpni_remove_vlan_id() - Remove VLAN ID filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @vlan_id: VLAN ID to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id);
++
++/**
++ * dpni_clear_vlan_filters() - Clear all VLAN filters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
++ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
++ * @DPNI_TX_SCHED_WEIGHTED: weighted based scheduling
++ */
++enum dpni_tx_schedule_mode {
++ DPNI_TX_SCHED_STRICT_PRIORITY,
++ DPNI_TX_SCHED_WEIGHTED,
++};
++
++/**
++ * struct dpni_tx_schedule_cfg - Structure representing Tx
++ * scheduling configuration
++ * @mode: scheduling mode
++ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000;
++ * not applicable for 'strict-priority' mode;
++ */
++struct dpni_tx_schedule_cfg {
++ enum dpni_tx_schedule_mode mode;
++ uint16_t delta_bandwidth;
++};
++
++/**
++ * struct dpni_tx_selection_cfg - Structure representing transmission
++ * selection configuration
++ * @tc_sched: an array of traffic-classes
++ */
++struct dpni_tx_selection_cfg {
++ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
++};
++
++/**
++ * dpni_set_tx_selection() - Set transmission selection configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: transmission selection configuration
++ *
++ * warning: Allowed only when DPNI is disabled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_selection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_tx_selection_cfg *cfg);
++
++/**
++ * enum dpni_dist_mode - DPNI distribution mode
++ * @DPNI_DIST_MODE_NONE: No distribution
++ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
++ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
++ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
++ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
++ */
++enum dpni_dist_mode {
++ DPNI_DIST_MODE_NONE = 0,
++ DPNI_DIST_MODE_HASH = 1,
++ DPNI_DIST_MODE_FS = 2
++};
++
++/**
++ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
++ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
++ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
++ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
++ */
++enum dpni_fs_miss_action {
++ DPNI_FS_MISS_DROP = 0,
++ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
++ DPNI_FS_MISS_HASH = 2
++};
++
++/**
++ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
++ * @miss_action: Miss action selection
++ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
++ */
++struct dpni_fs_tbl_cfg {
++ enum dpni_fs_miss_action miss_action;
++ uint16_t default_flow_id;
++};
++
++/**
++ * dpni_prepare_key_cfg() - function prepare extract parameters
++ * @cfg: defining a full Key Generation profile (rule)
++ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before the following functions:
++ * - dpni_set_rx_tc_dist()
++ * - dpni_set_qos_table()
++ */
++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
++ uint8_t *key_cfg_buf);
++
++/**
++ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
++ * @dist_size: Set the distribution size;
++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
++ * 112,128,192,224,256,384,448,512,768,896,1024
++ * @dist_mode: Distribution mode
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * the extractions to be used for the distribution key by calling
++ * dpni_prepare_key_cfg() relevant only when
++ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
++ * @fs_cfg: Flow Steering table configuration; only relevant if
++ * 'dist_mode = DPNI_DIST_MODE_FS'
++ */
++struct dpni_rx_tc_dist_cfg {
++ uint16_t dist_size;
++ enum dpni_dist_mode dist_mode;
++ uint64_t key_cfg_iova;
++ struct dpni_fs_tbl_cfg fs_cfg;
++};
++
++/**
++ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Traffic class distribution configuration
++ *
++ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
++ * first to prepare the key_cfg_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rx_tc_dist_cfg *cfg);
++
++/**
++ * Set to select color aware mode (otherwise - color blind)
++ */
++#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001
++/**
++ * Set to discard frame with RED color
++ */
++#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002
++
++/**
++ * enum dpni_policer_mode - selecting the policer mode
++ * @DPNI_POLICER_MODE_NONE: Policer is disabled
++ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through
++ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698
++ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115
++ */
++enum dpni_policer_mode {
++ DPNI_POLICER_MODE_NONE = 0,
++ DPNI_POLICER_MODE_PASS_THROUGH,
++ DPNI_POLICER_MODE_RFC_2698,
++ DPNI_POLICER_MODE_RFC_4115
++};
++
++/**
++ * enum dpni_policer_unit - DPNI policer units
++ * @DPNI_POLICER_UNIT_BYTES: bytes units
++ * @DPNI_POLICER_UNIT_FRAMES: frames units
++ */
++enum dpni_policer_unit {
++ DPNI_POLICER_UNIT_BYTES = 0,
++ DPNI_POLICER_UNIT_FRAMES
++};
++
++/**
++ * enum dpni_policer_color - selecting the policer color
++ * @DPNI_POLICER_COLOR_GREEN: Green color
++ * @DPNI_POLICER_COLOR_YELLOW: Yellow color
++ * @DPNI_POLICER_COLOR_RED: Red color
++ */
++enum dpni_policer_color {
++ DPNI_POLICER_COLOR_GREEN = 0,
++ DPNI_POLICER_COLOR_YELLOW,
++ DPNI_POLICER_COLOR_RED
++};
++
++/**
++ * struct dpni_rx_tc_policing_cfg - Policer configuration
++ * @options: Mask of available options; use 'DPNI_POLICER_OPT_<X>' values
++ * @mode: policer mode
++ * @default_color: For pass-through mode the policer re-colors with this
++ * color any incoming packets. For Color aware non-pass-through mode:
++ * policer re-colors with this color all packets with FD[DROPP]>2.
++ * @units: Bytes or Packets
++ * @cir: Committed information rate (CIR) in Kbps or packets/second
++ * @cbs: Committed burst size (CBS) in bytes or packets
++ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second
++ * Excess information rate (EIR, rfc4115) in Kbps or packets/second
++ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets
++ * Excess burst size (EBS, rfc4115) in bytes or packets
++ */
++struct dpni_rx_tc_policing_cfg {
++ uint32_t options;
++ enum dpni_policer_mode mode;
++ enum dpni_policer_unit units;
++ enum dpni_policer_color default_color;
++ uint32_t cir;
++ uint32_t cbs;
++ uint32_t eir;
++ uint32_t ebs;
++};
++
++/**
++ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Traffic class policing configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rx_tc_policing_cfg *cfg);
++
++/**
++ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Traffic class policing configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_rx_tc_policing_cfg *cfg);
++
++/**
++ * enum dpni_congestion_unit - DPNI congestion units
++ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
++ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
++ */
++enum dpni_congestion_unit {
++ DPNI_CONGESTION_UNIT_BYTES = 0,
++ DPNI_CONGESTION_UNIT_FRAMES
++};
++
++/**
++ * enum dpni_early_drop_mode - DPNI early drop mode
++ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled
++ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
++ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode
++ */
++enum dpni_early_drop_mode {
++ DPNI_EARLY_DROP_MODE_NONE = 0,
++ DPNI_EARLY_DROP_MODE_TAIL,
++ DPNI_EARLY_DROP_MODE_WRED
++};
++
++/**
++ * struct dpni_wred_cfg - WRED configuration
++ * @max_threshold: maximum threshold that packets may be discarded. Above this
++ * threshold all packets are discarded; must be less than 2^39;
++ * approximated to be expressed as (x+256)*2^(y-1) due to HW
++ * implementation.
++ * @min_threshold: minimum threshold that packets may be discarded at
++ * @drop_probability: probability that a packet will be discarded (1-100,
++ * associated with the max_threshold).
++ */
++struct dpni_wred_cfg {
++ uint64_t max_threshold;
++ uint64_t min_threshold;
++ uint8_t drop_probability;
++};
++
++/**
++ * struct dpni_early_drop_cfg - early-drop configuration
++ * @mode: drop mode
++ * @units: units type
++ * @green: WRED - 'green' configuration
++ * @yellow: WRED - 'yellow' configuration
++ * @red: WRED - 'red' configuration
++ * @tail_drop_threshold: tail drop threshold
++ */
++struct dpni_early_drop_cfg {
++ enum dpni_early_drop_mode mode;
++ enum dpni_congestion_unit units;
++
++ struct dpni_wred_cfg green;
++ struct dpni_wred_cfg yellow;
++ struct dpni_wred_cfg red;
++
++ uint32_t tail_drop_threshold;
++};
++
++/**
++ * dpni_prepare_early_drop() - prepare an early drop.
++ * @cfg: Early-drop configuration
++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before dpni_set_rx_tc_early_drop or
++ * dpni_set_tx_tc_early_drop
++ *
++ */
++void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg,
++ uint8_t *early_drop_buf);
++
++/**
++ * dpni_extract_early_drop() - extract the early drop configuration.
++ * @cfg: Early-drop configuration
++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called after dpni_get_rx_tc_early_drop or
++ * dpni_get_tx_tc_early_drop
++ *
++ */
++void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
++ const uint8_t *early_drop_buf);
++
++/**
++ * dpni_set_rx_tc_early_drop() - Set Rx traffic class early-drop configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled
++ * with the early-drop configuration by calling dpni_prepare_early_drop()
++ *
++ * warning: Before calling this function, call dpni_prepare_early_drop() to
++ * prepare the early_drop_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova);
++
++/**
++ * dpni_get_rx_tc_early_drop() - Get Rx traffic class early-drop configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory
++ *
++ * warning: After calling this function, call dpni_extract_early_drop() to
++ * get the early drop configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova);
++
++/**
++ * dpni_set_tx_tc_early_drop() - Set Tx traffic class early-drop configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled
++ * with the early-drop configuration by calling dpni_prepare_early_drop()
++ *
++ * warning: Before calling this function, call dpni_prepare_early_drop() to
++ * prepare the early_drop_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova);
++
++/**
++ * dpni_get_tx_tc_early_drop() - Get Tx traffic class early-drop configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory
++ *
++ * warning: After calling this function, call dpni_extract_early_drop() to
++ * get the early drop configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova);
++
++/**
++ * enum dpni_dest - DPNI destination types
++ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
++ * does not generate FQDAN notifications; user is expected to
++ * dequeue from the queue based on polling or other user-defined
++ * method
++ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected to dequeue
++ * from the queue only after notification is received
++ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified DPCON
++ * object; user is expected to dequeue from the DPCON channel
++ */
++enum dpni_dest {
++ DPNI_DEST_NONE = 0,
++ DPNI_DEST_DPIO = 1,
++ DPNI_DEST_DPCON = 2
++};
++
++/**
++ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid values
++ * are 0-1 or 0-7, depending on the number of priorities in that
++ * channel; not relevant for 'DPNI_DEST_NONE' option
++ */
++struct dpni_dest_cfg {
++ enum dpni_dest dest_type;
++ int dest_id;
++ uint8_t priority;
++};
++
++/* DPNI congestion options */
++
++/**
++ * CSCN message is written to message_iova once entering a
++ * congestion state (see 'threshold_entry')
++ */
++#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
++/**
++ * CSCN message is written to message_iova once exiting a
++ * congestion state (see 'threshold_exit')
++ */
++#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
++/**
++ * CSCN write will attempt to allocate into a cache (coherent write);
++ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
++ */
++#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
++ * DPIO/DPCON's WQ channel once entering a congestion state
++ * (see 'threshold_entry')
++ */
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
++ * DPIO/DPCON's WQ channel once exiting a congestion state
++ * (see 'threshold_exit')
++ */
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
++ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
++ */
++#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
++
++/**
++ * struct dpni_congestion_notification_cfg - congestion notification
++ * configuration
++ * @units: units type
++ * @threshold_entry: above this threshold we enter a congestion state.
++ * set it to '0' to disable it
++ * @threshold_exit: below this threshold we exit the congestion state.
++ * @message_ctx: The context that will be part of the CSCN message
++ * @message_iova: I/O virtual address (must be in DMA-able memory),
++ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
++ * contained in 'options'
++ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
++ * @options: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
++ */
++
++struct dpni_congestion_notification_cfg {
++ enum dpni_congestion_unit units;
++ uint32_t threshold_entry;
++ uint32_t threshold_exit;
++ uint64_t message_ctx;
++ uint64_t message_iova;
++ struct dpni_dest_cfg dest_cfg;
++ uint16_t options;
++};
++
++/**
++ * dpni_set_rx_tc_congestion_notification() - Set Rx traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * dpni_get_rx_tc_congestion_notification() - Get Rx traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * dpni_set_tx_tc_congestion_notification() - Set Tx traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * dpni_get_tx_tc_congestion_notification() - Get Tx traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * enum dpni_flc_type - DPNI FLC types
++ * @DPNI_FLC_USER_DEFINED: select the FLC to be used for user defined value
++ * @DPNI_FLC_STASH: select the FLC to be used for stash control
++ */
++enum dpni_flc_type {
++ DPNI_FLC_USER_DEFINED = 0,
++ DPNI_FLC_STASH = 1,
++};
++
++/**
++ * enum dpni_stash_size - DPNI FLC stashing size
++ * @DPNI_STASH_SIZE_0B: no stash
++ * @DPNI_STASH_SIZE_64B: stashes 64 bytes
++ * @DPNI_STASH_SIZE_128B: stashes 128 bytes
++ * @DPNI_STASH_SIZE_192B: stashes 192 bytes
++ */
++enum dpni_stash_size {
++ DPNI_STASH_SIZE_0B = 0,
++ DPNI_STASH_SIZE_64B = 1,
++ DPNI_STASH_SIZE_128B = 2,
++ DPNI_STASH_SIZE_192B = 3,
++};
++
++/* DPNI FLC stash options */
++
++/**
++ * stashes the whole annotation area (up to 192 bytes)
++ */
++#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001
++
++/**
++ * struct dpni_flc_cfg - Structure representing DPNI FLC configuration
++ * @flc_type: FLC type
++ * @options: Mask of available options;
++ * use 'DPNI_FLC_STASH_<X>' values
++ * @frame_data_size: Size of frame data to be stashed
++ * @flow_context_size: Size of flow context to be stashed
++ * @flow_context: 1. In case flc_type is 'DPNI_FLC_USER_DEFINED':
++ * this value will be provided in the frame descriptor
++ * (FD[FLC])
++ * 2. In case flc_type is 'DPNI_FLC_STASH':
++ * this value will be I/O virtual address of the
++ * flow-context;
++ * Must be cacheline-aligned and DMA-able memory
++ */
++struct dpni_flc_cfg {
++ enum dpni_flc_type flc_type;
++ uint32_t options;
++ enum dpni_stash_size frame_data_size;
++ enum dpni_stash_size flow_context_size;
++ uint64_t flow_context;
++};
++
++/**
++ * DPNI queue modification options
++ */
++
++/**
++ * Select to modify the user's context associated with the queue
++ */
++#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
++/**
++ * Select to modify the queue's destination
++ */
++#define DPNI_QUEUE_OPT_DEST 0x00000002
++/** Select to modify the flow-context parameters;
++ * not applicable for Tx-conf/Err queues as the FD comes from the user
++ */
++#define DPNI_QUEUE_OPT_FLC 0x00000004
++/**
++ * Select to modify the queue's order preservation
++ */
++#define DPNI_QUEUE_OPT_ORDER_PRESERVATION 0x00000008
++/* Select to modify the queue's tail-drop threshold */
++#define DPNI_QUEUE_OPT_TAILDROP_THRESHOLD 0x00000010
++
++/**
++ * struct dpni_queue_cfg - Structure representing queue configuration
++ * @options: Flags representing the suggested modifications to the queue;
++ * Use any combination of 'DPNI_QUEUE_OPT_<X>' flags
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX'
++ * is contained in 'options'
++ * @dest_cfg: Queue destination parameters;
++ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options'
++ * @flc_cfg: Flow context configuration; in case the TC's distribution
++ * is either NONE or HASH the FLC's settings of flow#0 are used.
++ * in the case of FS (flow-steering) the flow's FLC settings
++ * are used.
++ * valid only if 'DPNI_QUEUE_OPT_FLC' is contained in 'options'
++ * @order_preservation_en: enable/disable order preservation;
++ * valid only if 'DPNI_QUEUE_OPT_ORDER_PRESERVATION' is contained
++ * in 'options'
++ * @tail_drop_threshold: set the queue's tail drop threshold in bytes;
++ * '0' value disable the threshold; maximum value is 0xE000000;
++ * valid only if 'DPNI_QUEUE_OPT_TAILDROP_THRESHOLD' is contained
++ * in 'options'
++ */
++struct dpni_queue_cfg {
++ uint32_t options;
++ uint64_t user_ctx;
++ struct dpni_dest_cfg dest_cfg;
++ struct dpni_flc_cfg flc_cfg;
++ int order_preservation_en;
++ uint32_t tail_drop_threshold;
++};
++
++/**
++ * struct dpni_queue_attr - Structure representing queue attributes
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame
++ * @dest_cfg: Queue destination configuration
++ * @flc_cfg: Flow context configuration
++ * @order_preservation_en: enable/disable order preservation
++ * @tail_drop_threshold: queue's tail drop threshold in bytes;
++ * @fqid: Virtual fqid value to be used for dequeue operations
++ */
++struct dpni_queue_attr {
++ uint64_t user_ctx;
++ struct dpni_dest_cfg dest_cfg;
++ struct dpni_flc_cfg flc_cfg;
++ int order_preservation_en;
++ uint32_t tail_drop_threshold;
++
++ uint32_t fqid;
++};
++
++/**
++ * DPNI Tx flow modification options
++ */
++
++/**
++ * Select to modify the settings for dedicate Tx confirmation/error
++ */
++#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001
++/**
++ * Select to modify the L3 checksum generation setting
++ */
++#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010
++/**
++ * Select to modify the L4 checksum generation setting
++ */
++#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020
++
++/**
++ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration
++ * @options: Flags representing the suggested modifications to the Tx flow;
++ * Use any combination 'DPNI_TX_FLOW_OPT_<X>' flags
++ * @use_common_tx_conf_queue: Set to '1' to use the common (default) Tx
++ * confirmation and error queue; Set to '0' to use the private
++ * Tx confirmation and error queue; valid only if
++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' wasn't set at DPNI creation
++ * and 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in 'options'
++ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable;
++ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in 'options'
++ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable;
++ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in 'options'
++ */
++struct dpni_tx_flow_cfg {
++ uint32_t options;
++ int use_common_tx_conf_queue;
++ int l3_chksum_gen;
++ int l4_chksum_gen;
++};
++
++/**
++ * dpni_set_tx_flow() - Set Tx flow configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: Provides (or returns) the sender's flow ID;
++ * for each new sender set (*flow_id) to 'DPNI_NEW_FLOW_ID' to generate
++ * a new flow_id; this ID should be used as the QDBIN argument
++ * in enqueue operations
++ * @cfg: Tx flow configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *flow_id,
++ const struct dpni_tx_flow_cfg *cfg);
++
++/**
++ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes
++ * @use_common_tx_conf_queue: '1' if using common (default) Tx confirmation and
++ * error queue; '0' if using private Tx confirmation and error queue
++ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled
++ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled
++ */
++struct dpni_tx_flow_attr {
++ int use_common_tx_conf_queue;
++ int l3_chksum_gen;
++ int l4_chksum_gen;
++};
++
++/**
++ * dpni_get_tx_flow() - Get Tx flow attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: The sender's flow ID, as returned by the
++ * dpni_set_tx_flow() function
++ * @attr: Returned Tx flow attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_tx_flow_attr *attr);
++
++/**
++ * struct dpni_tx_conf_cfg - Structure representing Tx conf configuration
++ * @errors_only: Set to '1' to report back only error frames;
++ * Set to '0' to confirm transmission/error for all transmitted frames;
++ * @queue_cfg: Queue configuration
++ */
++struct dpni_tx_conf_cfg {
++ int errors_only;
++ struct dpni_queue_cfg queue_cfg;
++};
++
++/**
++ * dpni_set_tx_conf() - Set Tx confirmation and error queue configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: The sender's flow ID, as returned by the
++ * dpni_set_tx_flow() function;
++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
++ * @cfg: Queue configuration
++ *
++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
++ * i.e. only serve the common tx-conf-err queue;
++ * if 'DPNI_OPT_TX_CONF_DISABLED' was selected, only error frames are reported
++ * back - successfully transmitted frames are not confirmed. Otherwise, all
++ * transmitted frames are sent for confirmation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_conf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ const struct dpni_tx_conf_cfg *cfg);
++
++/**
++ * struct dpni_tx_conf_attr - Structure representing Tx conf attributes
++ * @errors_only: '1' if only error frames are reported back; '0' if all
++ * transmitted frames are confirmed
++ * @queue_attr: Queue attributes
++ */
++struct dpni_tx_conf_attr {
++ int errors_only;
++ struct dpni_queue_attr queue_attr;
++};
++
++/**
++ * dpni_get_tx_conf() - Get Tx confirmation and error queue attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: The sender's flow ID, as returned by the
++ * dpni_set_tx_flow() function;
++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
++ * @attr: Returned tx-conf attributes
++ *
++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
++ * i.e. only serve the common tx-conf-err queue;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_conf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_tx_conf_attr *attr);
++
++/**
++ * dpni_set_tx_conf_congestion_notification() - Set Tx conf congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: The sender's flow ID, as returned by the
++ * dpni_set_tx_flow() function;
++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
++ * @cfg: congestion notification configuration
++ *
++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
++ * i.e. only serve the common tx-conf-err queue;
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ const struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * dpni_get_tx_conf_congestion_notification() - Get Tx conf congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: The sender's flow ID, as returned by the
++ * dpni_set_tx_flow() function;
++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
++ * @cfg: congestion notification
++ *
++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
++ * i.e. only serve the common tx-conf-err queue;
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * dpni_set_tx_conf_revoke() - Tx confirmation revocation
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @revoke: revoke or not
++ *
++ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not
++ * selected at DPNI creation.
++ * Calling this function with 'revoke' set to '1' disables all transmit
++ * confirmation (including the private confirmation queues), regardless of
++ * previous settings; Note that in this case, Tx error frames are still
++ * enqueued to the general transmit errors queue.
++ * Calling this function with 'revoke' set to '0' restores the previous
++ * settings for both general and private transmit confirmation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int revoke);
++
++/**
++ * dpni_set_rx_flow() - Set Rx flow configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7);
++ * use 'DPNI_ALL_TCS' to set all TCs and all flows
++ * @flow_id: Rx flow id within the traffic class; use
++ * 'DPNI_ALL_TC_FLOWS' to set all flows within
++ * this tc_id; ignored if tc_id is set to
++ * 'DPNI_ALL_TCS';
++ * @cfg: Rx flow configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_rx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint16_t flow_id,
++ const struct dpni_queue_cfg *cfg);
++
++/**
++ * dpni_get_rx_flow() - Get Rx flow attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @flow_id: Rx flow id within the traffic class
++ * @attr: Returned Rx flow attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_rx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint16_t flow_id,
++ struct dpni_queue_attr *attr);
++
++/**
++ * dpni_set_rx_err_queue() - Set Rx error queue configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Queue configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_queue_cfg *cfg);
++
++/**
++ * dpni_get_rx_err_queue() - Get Rx error queue attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @attr: Returned Queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_queue_attr *attr);
++
++/**
++ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * key extractions to be used as the QoS criteria by calling
++ * dpni_prepare_key_cfg()
++ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
++ * '0' to use the 'default_tc' in such cases
++ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
++ */
++struct dpni_qos_tbl_cfg {
++ uint64_t key_cfg_iova;
++ int discard_on_miss;
++ uint8_t default_tc;
++};
++
++/**
++ * dpni_set_qos_table() - Set QoS mapping table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS table configuration
++ *
++ * This function and all QoS-related functions require that
++ *'max_tcs > 1' was set at DPNI creation.
++ *
++ * warning: Before calling this function, call dpni_prepare_key_cfg() to
++ * prepare the key_cfg_iova parameter
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_qos_table(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_qos_tbl_cfg *cfg);
++
++/**
++ * struct dpni_rule_cfg - Rule configuration for table lookup
++ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
++ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
++ * @key_size: key and mask size (in bytes)
++ */
++struct dpni_rule_cfg {
++ uint64_t key_iova;
++ uint64_t mask_iova;
++ uint8_t key_size;
++};
++
++/**
++ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS rule to add
++ * @tc_id: Traffic class selection (0-7)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_rule_cfg *cfg,
++ uint8_t tc_id);
++
++/**
++ * dpni_remove_qos_entry() - Remove QoS mapping entry
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS rule to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_rule_cfg *cfg);
++
++/**
++ * dpni_clear_qos_table() - Clear all QoS mapping entries
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Following this function call, all frames are directed to
++ * the default traffic class (0)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
++ * (to select a flow ID)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Flow steering rule to add
++ * @flow_id: Flow id selection (must be smaller than the
++ * distribution size of the traffic class)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rule_cfg *cfg,
++ uint16_t flow_id);
++
++/**
++ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
++ * traffic class
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Flow steering rule to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rule_cfg *cfg);
++
++/**
++ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific
++ * traffic class
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id);
++
++/**
++ * dpni_set_vlan_insertion() - Enable/disable VLAN insertion for egress frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set
++ * at DPNI creation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_set_vlan_removal() - Enable/disable VLAN removal for ingress frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set
++ * at DPNI creation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_vlan_removal(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_set_ipr() - Enable/disable IP reassembly of ingress frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Requires that the 'DPNI_OPT_IPR' option is set at DPNI creation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_ipr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_set_ipf() - Enable/disable IP fragmentation of egress frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Requires that the 'DPNI_OPT_IPF' option is set at DPNI
++ * creation. Fragmentation is performed according to MTU value
++ * set by dpni_set_mtu() function
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_ipf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++#endif /* __FSL_DPNI_H */
+--- a/drivers/staging/fsl-mc/include/mc-cmd.h
++++ b/drivers/staging/fsl-mc/include/mc-cmd.h
+@@ -103,8 +103,11 @@ enum mc_cmd_status {
+ #define MC_CMD_HDR_READ_FLAGS(_hdr) \
+ ((u32)mc_dec((_hdr), MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S))
+
++#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \
++ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg)))
++
+ #define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \
+- ((_ext)[_param] |= mc_enc((_offset), (_width), _arg))
++ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width)))
+
+ #define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/net.h
+@@ -0,0 +1,481 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_NET_H
++#define __FSL_NET_H
++
++#define LAST_HDR_INDEX 0xFFFFFFFF
++
++/*****************************************************************************/
++/* Protocol fields */
++/*****************************************************************************/
++
++/************************* Ethernet fields *********************************/
++#define NH_FLD_ETH_DA (1)
++#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
++#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
++#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
++#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
++#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
++#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
++
++#define NH_FLD_ETH_ADDR_SIZE 6
++
++/*************************** VLAN fields ***********************************/
++#define NH_FLD_VLAN_VPRI (1)
++#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
++#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
++#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
++#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
++#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
++
++#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
++ NH_FLD_VLAN_CFI | \
++ NH_FLD_VLAN_VID)
++
++/************************ IP (generic) fields ******************************/
++#define NH_FLD_IP_VER (1)
++#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
++#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
++#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
++#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
++#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
++#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
++#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
++#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
++
++#define NH_FLD_IP_PROTO_SIZE 1
++
++/***************************** IPV4 fields *********************************/
++#define NH_FLD_IPV4_VER (1)
++#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
++#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
++#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
++#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
++#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
++#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
++#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
++#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
++#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
++#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
++#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
++#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
++#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
++#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
++#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
++
++#define NH_FLD_IPV4_ADDR_SIZE 4
++#define NH_FLD_IPV4_PROTO_SIZE 1
++
++/***************************** IPV6 fields *********************************/
++#define NH_FLD_IPV6_VER (1)
++#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
++#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
++#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
++#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
++#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
++#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
++#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
++#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
++
++#define NH_FLD_IPV6_ADDR_SIZE 16
++#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
++
++/***************************** ICMP fields *********************************/
++#define NH_FLD_ICMP_TYPE (1)
++#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
++#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
++#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
++#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
++#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
++
++#define NH_FLD_ICMP_CODE_SIZE 1
++#define NH_FLD_ICMP_TYPE_SIZE 1
++
++/***************************** IGMP fields *********************************/
++#define NH_FLD_IGMP_VERSION (1)
++#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
++#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
++#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
++#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
++
++/***************************** TCP fields **********************************/
++#define NH_FLD_TCP_PORT_SRC (1)
++#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
++#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
++#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
++#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
++#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
++#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
++#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
++#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
++#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
++#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
++#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
++
++#define NH_FLD_TCP_PORT_SIZE 2
++
++/***************************** UDP fields **********************************/
++#define NH_FLD_UDP_PORT_SRC (1)
++#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
++#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
++#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
++#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
++
++#define NH_FLD_UDP_PORT_SIZE 2
++
++/*************************** UDP-lite fields *******************************/
++#define NH_FLD_UDP_LITE_PORT_SRC (1)
++#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
++#define NH_FLD_UDP_LITE_ALL_FIELDS \
++ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
++
++#define NH_FLD_UDP_LITE_PORT_SIZE 2
++
++/*************************** UDP-encap-ESP fields **************************/
++#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
++#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
++#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
++#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
++#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
++#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
++#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
++ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
++
++#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
++#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
++
++/***************************** SCTP fields *********************************/
++#define NH_FLD_SCTP_PORT_SRC (1)
++#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
++#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
++#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
++#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
++
++#define NH_FLD_SCTP_PORT_SIZE 2
++
++/***************************** DCCP fields *********************************/
++#define NH_FLD_DCCP_PORT_SRC (1)
++#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
++#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
++
++#define NH_FLD_DCCP_PORT_SIZE 2
++
++/***************************** IPHC fields *********************************/
++#define NH_FLD_IPHC_CID (1)
++#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
++#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
++#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
++#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
++#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
++
++/***************************** SCTP fields *********************************/
++#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
++#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
++#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
++#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
++#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
++#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
++#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
++#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
++#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
++ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
++
++/*************************** L2TPV2 fields *********************************/
++#define NH_FLD_L2TPV2_TYPE_BIT (1)
++#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
++#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
++#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
++#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
++#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
++#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
++#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
++#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
++#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
++#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
++#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
++#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
++#define NH_FLD_L2TPV2_ALL_FIELDS \
++ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
++
++/*************************** L2TPV3 fields *********************************/
++#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
++#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
++#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
++#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
++#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
++#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
++#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
++#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
++#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
++#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
++ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
++
++#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
++#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
++#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
++#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
++#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
++ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
++
++/**************************** PPP fields ***********************************/
++#define NH_FLD_PPP_PID (1)
++#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
++#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
++
++/************************** PPPoE fields ***********************************/
++#define NH_FLD_PPPOE_VER (1)
++#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
++#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
++#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
++#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
++#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
++#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
++#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
++
++/************************* PPP-Mux fields **********************************/
++#define NH_FLD_PPPMUX_PID (1)
++#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
++#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
++#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
++
++/*********************** PPP-Mux sub-frame fields **************************/
++#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
++#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
++#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
++#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
++#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
++#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
++ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
++
++/*************************** LLC fields ************************************/
++#define NH_FLD_LLC_DSAP (1)
++#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
++#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
++#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
++
++/*************************** NLPID fields **********************************/
++#define NH_FLD_NLPID_NLPID (1)
++#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
++
++/*************************** SNAP fields ***********************************/
++#define NH_FLD_SNAP_OUI (1)
++#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
++#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
++
++/*************************** LLC SNAP fields *******************************/
++#define NH_FLD_LLC_SNAP_TYPE (1)
++#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
++
++#define NH_FLD_ARP_HTYPE (1)
++#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
++#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
++#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
++#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
++#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
++#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
++#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
++#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
++#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
++
++/*************************** RFC2684 fields ********************************/
++#define NH_FLD_RFC2684_LLC (1)
++#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
++#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
++#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
++#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
++#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
++#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
++
++/*************************** User defined fields ***************************/
++#define NH_FLD_USER_DEFINED_SRCPORT (1)
++#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
++#define NH_FLD_USER_DEFINED_ALL_FIELDS \
++ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
++
++/*************************** Payload fields ********************************/
++#define NH_FLD_PAYLOAD_BUFFER (1)
++#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
++#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
++#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
++#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
++#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
++#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
++
++/*************************** GRE fields ************************************/
++#define NH_FLD_GRE_TYPE (1)
++#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
++
++/*************************** MINENCAP fields *******************************/
++#define NH_FLD_MINENCAP_SRC_IP (1)
++#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
++#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
++#define NH_FLD_MINENCAP_ALL_FIELDS \
++ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
++
++/*************************** IPSEC AH fields *******************************/
++#define NH_FLD_IPSEC_AH_SPI (1)
++#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
++#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
++
++/*************************** IPSEC ESP fields ******************************/
++#define NH_FLD_IPSEC_ESP_SPI (1)
++#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
++#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
++
++#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
++
++/*************************** MPLS fields ***********************************/
++#define NH_FLD_MPLS_LABEL_STACK (1)
++#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
++ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
++
++/*************************** MACSEC fields *********************************/
++#define NH_FLD_MACSEC_SECTAG (1)
++#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
++
++/*************************** GTP fields ************************************/
++#define NH_FLD_GTP_TEID (1)
++
++
++/* Protocol options */
++
++/* Ethernet options */
++#define NH_OPT_ETH_BROADCAST 1
++#define NH_OPT_ETH_MULTICAST 2
++#define NH_OPT_ETH_UNICAST 3
++#define NH_OPT_ETH_BPDU 4
++
++#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
++/* also applicable for broadcast */
++
++/* VLAN options */
++#define NH_OPT_VLAN_CFI 1
++
++/* IPV4 options */
++#define NH_OPT_IPV4_UNICAST 1
++#define NH_OPT_IPV4_MULTICAST 2
++#define NH_OPT_IPV4_BROADCAST 3
++#define NH_OPT_IPV4_OPTION 4
++#define NH_OPT_IPV4_FRAG 5
++#define NH_OPT_IPV4_INITIAL_FRAG 6
++
++/* IPV6 options */
++#define NH_OPT_IPV6_UNICAST 1
++#define NH_OPT_IPV6_MULTICAST 2
++#define NH_OPT_IPV6_OPTION 3
++#define NH_OPT_IPV6_FRAG 4
++#define NH_OPT_IPV6_INITIAL_FRAG 5
++
++/* General IP options (may be used for any version) */
++#define NH_OPT_IP_FRAG 1
++#define NH_OPT_IP_INITIAL_FRAG 2
++#define NH_OPT_IP_OPTION 3
++
++/* Minenc. options */
++#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
++
++/* GRE. options */
++#define NH_OPT_GRE_ROUTING_PRESENT 1
++
++/* TCP options */
++#define NH_OPT_TCP_OPTIONS 1
++#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
++#define NH_OPT_TCP_CONTROL_LOW_BITS 3
++
++/* CAPWAP options */
++#define NH_OPT_CAPWAP_DTLS 1
++
++enum net_prot {
++ NET_PROT_NONE = 0,
++ NET_PROT_PAYLOAD,
++ NET_PROT_ETH,
++ NET_PROT_VLAN,
++ NET_PROT_IPV4,
++ NET_PROT_IPV6,
++ NET_PROT_IP,
++ NET_PROT_TCP,
++ NET_PROT_UDP,
++ NET_PROT_UDP_LITE,
++ NET_PROT_IPHC,
++ NET_PROT_SCTP,
++ NET_PROT_SCTP_CHUNK_DATA,
++ NET_PROT_PPPOE,
++ NET_PROT_PPP,
++ NET_PROT_PPPMUX,
++ NET_PROT_PPPMUX_SUBFRM,
++ NET_PROT_L2TPV2,
++ NET_PROT_L2TPV3_CTRL,
++ NET_PROT_L2TPV3_SESS,
++ NET_PROT_LLC,
++ NET_PROT_LLC_SNAP,
++ NET_PROT_NLPID,
++ NET_PROT_SNAP,
++ NET_PROT_MPLS,
++ NET_PROT_IPSEC_AH,
++ NET_PROT_IPSEC_ESP,
++ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
++ NET_PROT_MACSEC,
++ NET_PROT_GRE,
++ NET_PROT_MINENCAP,
++ NET_PROT_DCCP,
++ NET_PROT_ICMP,
++ NET_PROT_IGMP,
++ NET_PROT_ARP,
++ NET_PROT_CAPWAP_DATA,
++ NET_PROT_CAPWAP_CTRL,
++ NET_PROT_RFC2684,
++ NET_PROT_ICMPV6,
++ NET_PROT_FCOE,
++ NET_PROT_FIP,
++ NET_PROT_ISCSI,
++ NET_PROT_GTP,
++ NET_PROT_USER_DEFINED_L2,
++ NET_PROT_USER_DEFINED_L3,
++ NET_PROT_USER_DEFINED_L4,
++ NET_PROT_USER_DEFINED_L5,
++ NET_PROT_USER_DEFINED_SHIM1,
++ NET_PROT_USER_DEFINED_SHIM2,
++
++ NET_PROT_DUMMY_LAST
++};
++
++/*! IEEE8021.Q */
++#define NH_IEEE8021Q_ETYPE 0x8100
++#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
++ ((((uint32_t)(etype & 0xFFFF)) << 16) | \
++ (((uint32_t)(pcp & 0x07)) << 13) | \
++ (((uint32_t)(dei & 0x01)) << 12) | \
++ (((uint32_t)(vlan_id & 0xFFF))))
++
++#endif /* __FSL_NET_H */
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2790,6 +2790,7 @@ static struct sk_buff *pktgen_alloc_skb(
+ } else {
+ skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
+ }
++ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+
+ /* the caller pre-fetches from skb->data and reserves for the mac hdr */
+ if (likely(skb))
diff --git a/target/linux/layerscape/patches-4.4/7202-staging-fsl-dpaa2-eth-code-cleanup-for-upstreaming.patch b/target/linux/layerscape/patches-4.4/7202-staging-fsl-dpaa2-eth-code-cleanup-for-upstreaming.patch
new file mode 100644
index 0000000..d331b66
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7202-staging-fsl-dpaa2-eth-code-cleanup-for-upstreaming.patch
@@ -0,0 +1,3257 @@
+From 2a6f0dd5425cf43b8c09a8203e6ee64ba2b3868d Mon Sep 17 00:00:00 2001
+From: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+Date: Tue, 12 Jan 2016 08:58:40 +0200
+Subject: [PATCH 202/226] staging: fsl-dpaa2: eth: code cleanup for
+ upstreaming
+
+-this is a squash of cleanup commits (see QLINUX-5338), all commit logs
+ are below
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+---------------------------------------------------------------------
+
+fsl-dpaa2: eth: Drain queues upon ifconfig down
+
+MC firmware assists in draining the Tx FQs; the Eth driver flushes the
+Rx and TxConfirm queues then empties the buffer pool.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: eth: Don't use magic numbers
+
+Add a define to avoid mentioning directly the maximum number
+of buffers released/acquired through a single QBMan command.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+dpaa2-eth: Remove cpumask_rr macro
+
+It's only used in one place and not very intuitive
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Rename a variable
+
+The old name was a leftover and non-intuitive.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Rearrange code
+
+Rearrange the conditional statements in several functions
+to avoid excessive indenting, with no change in functionality.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Remove incorrect check
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Fix bug on error path
+
+We were not doing a DMA unmap on the error path of dpaa2_dpni_setup.
+Reorganize the code a bit to avoid this.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Error messages cleanup
+
+This commit cleans up and improves uniformity of messages on
+error paths throughout the Ethernet driver:
+
+* don't use WARN/WARN_ON/WARN_ONCE for warning messages, as
+we don't need a stack dump
+* give up using the DPAA2_ETH_WARN_IF_ERR custom macro
+* ensure dev_err and netdev_err are each used where needed and
+not randomly
+* remove error messages on memory allocation failures; the kernel
+is quite capable of dumping a detailed message when that happens
+* remove error messages on the fast path; we don't want to flood
+the console and we already increment counters in most error cases
+* ratelimit error messages where appropriate
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: eth: Fix name of ethtool counters
+
+Rename counters in ethtool -S from "portal busy" to "dequeue portal
+busy" and from "tx portal busy" to "enqueue portal busy", so it's
+less confusing for the user.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: eth: Retry DAN rearm if portal busy
+
+There's a chance the data available notification rearming will
+fail if the QBMan portal is busy. Keep retrying until portal
+becomes available again, like we do for buffer release and
+pull dequeue operations.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: eth: Add cpu_relax() to portal busy loops
+
+For several DPIO operations, we may need to repeatedly try
+until the QBMan portal is no longer busy. Add a cpu_relax() to
+those loops, like we were already doing when seeding buffers.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: eth: Add a counter for channel pull errors
+
+We no longer print an error message in this case, so add an error
+counter so we can at least know something went wrong.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: eth: Function renames
+
+Attempt to provide more uniformity for the DPAA2 Ethernet
+driver function naming conventions:
+* major functions (ndo_ops, driver ops, ethtool, etc) all have
+the "dpaa2_eth" prefix
+* non-static functions also start with "dpaa2_eth"
+* static helper functions don't get any prefix in order to avoid
+very long names
+* some functions get more intuitive and/or explicit names
+* don't use names starting with an underscore
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: eth: Structure and macro renames
+
+Some more renaming:
+* defines of error/status bits in the frame annotation status
+word get a "DPAA2_FAS" prefix instead of "DPAA2_ETH_FAS", as they're
+not really specific to the ethernet driver. We may consider moving
+these defines to a separate header file in the future
+* DPAA2_ETH_RX_BUFFER_SIZE is renamed to DPAA2_ETH_RX_BUF_SIZE
+to better match the naming style of other defines
+* structure "dpaa2_eth_stats" becomes "dpaa2_eth_drv_stats" to
+make it clear these are driver specific statistics
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Cosmetics
+
+Various coding style fixes and other minor cosmetics,
+with no functional impact. Also remove a couple of unused
+defines and a structure field.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Move function call
+
+Move call to set_fq_affinity() from probe to setup_fqs(), as it
+logically belongs there.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: eth: Comments cleanup
+
+Add relevant comments where needed, remove obsolete or
+useless ones.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: eth: Remove link poll Kconfig option
+
+Always try to use interrupts, but if they are not available
+fall back to polling the link state.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: eth: Remove message level
+
+We were defining netif message level, but we weren't using
+it when printing error/info messages, so remove for now.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: eth: fix compile error on 4.5 uprev
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/ethernet/Kconfig | 6 -
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 6 +-
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 992 ++++++++++----------
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 133 +--
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 226 ++---
+ 5 files changed, 693 insertions(+), 670 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/ethernet/Kconfig
++++ b/drivers/staging/fsl-dpaa2/ethernet/Kconfig
+@@ -16,12 +16,6 @@ menuconfig FSL_DPAA2_ETH
+ driver, using the Freescale MC bus driver.
+
+ if FSL_DPAA2_ETH
+-config FSL_DPAA2_ETH_LINK_POLL
+- bool "Use polling mode for link state"
+- default n
+- ---help---
+- Poll for detecting link state changes instead of using
+- interrupts.
+
+ config FSL_DPAA2_ETH_USE_ERR_QUEUE
+ bool "Enable Rx error queue"
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
+@@ -30,7 +30,6 @@
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+-
+ #include <linux/module.h>
+ #include <linux/debugfs.h>
+ #include "dpaa2-eth.h"
+@@ -38,14 +37,13 @@
+
+ #define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
+
+-
+ static struct dentry *dpaa2_dbg_root;
+
+ static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
+ {
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct rtnl_link_stats64 *stats;
+- struct dpaa2_eth_stats *extras;
++ struct dpaa2_eth_drv_stats *extras;
+ int i;
+
+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
+@@ -200,7 +198,7 @@ static ssize_t dpaa2_dbg_reset_write(str
+ {
+ struct dpaa2_eth_priv *priv = file->private_data;
+ struct rtnl_link_stats64 *percpu_stats;
+- struct dpaa2_eth_stats *percpu_extras;
++ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_eth_channel *ch;
+ int i;
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+@@ -53,26 +53,14 @@ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_AUTHOR("Freescale Semiconductor, Inc");
+ MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+
+-static int debug = -1;
+-module_param(debug, int, S_IRUGO);
+-MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+-
+ /* Oldest DPAA2 objects version we are compatible with */
+ #define DPAA2_SUPPORTED_DPNI_VERSION 6
+ #define DPAA2_SUPPORTED_DPBP_VERSION 2
+ #define DPAA2_SUPPORTED_DPCON_VERSION 2
+
+-/* Iterate through the cpumask in a round-robin fashion. */
+-#define cpumask_rr(cpu, maskptr) \
+-do { \
+- (cpu) = cpumask_next((cpu), (maskptr)); \
+- if ((cpu) >= nr_cpu_ids) \
+- (cpu) = cpumask_first((maskptr)); \
+-} while (0)
+-
+-static void dpaa2_eth_rx_csum(struct dpaa2_eth_priv *priv,
+- u32 fd_status,
+- struct sk_buff *skb)
++static void validate_rx_csum(struct dpaa2_eth_priv *priv,
++ u32 fd_status,
++ struct sk_buff *skb)
+ {
+ skb_checksum_none_assert(skb);
+
+@@ -81,8 +69,8 @@ static void dpaa2_eth_rx_csum(struct dpa
+ return;
+
+ /* Read checksum validation bits */
+- if (!((fd_status & DPAA2_ETH_FAS_L3CV) &&
+- (fd_status & DPAA2_ETH_FAS_L4CV)))
++ if (!((fd_status & DPAA2_FAS_L3CV) &&
++ (fd_status & DPAA2_FAS_L4CV)))
+ return;
+
+ /* Inform the stack there's no need to compute L3/L4 csum anymore */
+@@ -92,53 +80,55 @@ static void dpaa2_eth_rx_csum(struct dpa
+ /* Free a received FD.
+ * Not to be used for Tx conf FDs or on any other paths.
+ */
+-static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
+- const struct dpaa2_fd *fd,
+- void *vaddr)
++static void free_rx_fd(struct dpaa2_eth_priv *priv,
++ const struct dpaa2_fd *fd,
++ void *vaddr)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
++ struct dpaa2_sg_entry *sgt;
++ void *sg_vaddr;
++ int i;
+
+- if (fd_format == dpaa2_fd_sg) {
+- struct dpaa2_sg_entry *sgt = vaddr + dpaa2_fd_get_offset(fd);
+- void *sg_vaddr;
+- int i;
++ /* If single buffer frame, just free the data buffer */
++ if (fd_format == dpaa2_fd_single)
++ goto free_buf;
+
+- for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+- dpaa2_sg_le_to_cpu(&sgt[i]);
++ /* For S/G frames, we first need to free all SG entries */
++ sgt = vaddr + dpaa2_fd_get_offset(fd);
++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
++ dpaa2_sg_le_to_cpu(&sgt[i]);
+
+- addr = dpaa2_sg_get_addr(&sgt[i]);
+- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE,
+- DMA_FROM_DEVICE);
++ addr = dpaa2_sg_get_addr(&sgt[i]);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_FROM_DEVICE);
+
+- sg_vaddr = phys_to_virt(addr);
+- put_page(virt_to_head_page(sg_vaddr));
++ sg_vaddr = phys_to_virt(addr);
++ put_page(virt_to_head_page(sg_vaddr));
+
+- if (dpaa2_sg_is_final(&sgt[i]))
+- break;
+- }
++ if (dpaa2_sg_is_final(&sgt[i]))
++ break;
+ }
+
++free_buf:
+ put_page(virt_to_head_page(vaddr));
+ }
+
+ /* Build a linear skb based on a single-buffer frame descriptor */
+-static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_priv *priv,
+- struct dpaa2_eth_channel *ch,
+- const struct dpaa2_fd *fd,
+- void *fd_vaddr)
++static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ void *fd_vaddr)
+ {
+ struct sk_buff *skb = NULL;
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ u32 fd_length = dpaa2_fd_get_len(fd);
+
+- skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUFFER_SIZE +
++ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+- if (unlikely(!skb)) {
+- netdev_err(priv->net_dev, "build_skb() failed\n");
++ if (unlikely(!skb))
+ return NULL;
+- }
+
+ skb_reserve(skb, fd_offset);
+ skb_put(skb, fd_length);
+@@ -149,9 +139,9 @@ static struct sk_buff *dpaa2_eth_build_l
+ }
+
+ /* Build a non linear (fragmented) skb based on a S/G table */
+-static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
+- struct dpaa2_eth_channel *ch,
+- struct dpaa2_sg_entry *sgt)
++static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ struct dpaa2_sg_entry *sgt)
+ {
+ struct sk_buff *skb = NULL;
+ struct device *dev = priv->net_dev->dev.parent;
+@@ -168,66 +158,57 @@ static struct sk_buff *dpaa2_eth_build_f
+
+ dpaa2_sg_le_to_cpu(sge);
+
+- /* We don't support anything else yet! */
+- if (unlikely(dpaa2_sg_get_format(sge) != dpaa2_sg_single)) {
+- dev_warn_once(dev, "Unsupported S/G entry format: %d\n",
+- dpaa2_sg_get_format(sge));
+- return NULL;
+- }
++ /* NOTE: We only support SG entries in dpaa2_sg_single format,
++ * but this is the only format we may receive from HW anyway
++ */
+
+- /* Get the address, offset and length from the S/G entry */
++ /* Get the address and length from the S/G entry */
+ sg_addr = dpaa2_sg_get_addr(sge);
+- dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUFFER_SIZE,
++ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(dev, sg_addr))) {
+- netdev_err(priv->net_dev, "DMA unmap failed\n");
+- return NULL;
+- }
++
+ sg_vaddr = phys_to_virt(sg_addr);
+ sg_length = dpaa2_sg_get_len(sge);
+
+ if (i == 0) {
+ /* We build the skb around the first data buffer */
+- skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUFFER_SIZE +
++ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+- if (unlikely(!skb)) {
+- netdev_err(priv->net_dev, "build_skb failed\n");
++ if (unlikely(!skb))
+ return NULL;
+- }
++
+ sg_offset = dpaa2_sg_get_offset(sge);
+ skb_reserve(skb, sg_offset);
+ skb_put(skb, sg_length);
+ } else {
+- /* Subsequent data in SGEntries are stored at
+- * offset 0 in their buffers, we don't need to
+- * compute sg_offset.
+- */
+- WARN_ONCE(dpaa2_sg_get_offset(sge) != 0,
+- "Non-zero offset in SGE[%d]!\n", i);
+-
+ /* Rest of the data buffers are stored as skb frags */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+- /* Offset in page (which may be compound) */
++ /* Offset in page (which may be compound).
++ * Data in subsequent SG entries is stored from the
++ * beginning of the buffer, so we don't need to add the
++ * sg_offset.
++ */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+
+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+- sg_length, DPAA2_ETH_RX_BUFFER_SIZE);
++ sg_length, DPAA2_ETH_RX_BUF_SIZE);
+ }
+
+ if (dpaa2_sg_is_final(sge))
+ break;
+ }
+
+- /* Count all data buffers + sgt buffer */
++ /* Count all data buffers + SG table buffer */
+ ch->buf_count -= i + 2;
+
+ return skb;
+ }
+
++/* Main Rx frame processing routine */
+ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+@@ -238,7 +219,7 @@ static void dpaa2_eth_rx(struct dpaa2_et
+ void *vaddr;
+ struct sk_buff *skb;
+ struct rtnl_link_stats64 *percpu_stats;
+- struct dpaa2_eth_stats *percpu_extras;
++ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_fas *fas;
+ u32 status = 0;
+@@ -246,7 +227,7 @@ static void dpaa2_eth_rx(struct dpaa2_et
+ /* Tracing point */
+ trace_dpaa2_rx_fd(priv->net_dev, fd);
+
+- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ vaddr = phys_to_virt(addr);
+
+ prefetch(vaddr + priv->buf_layout.private_data_size);
+@@ -256,32 +237,30 @@ static void dpaa2_eth_rx(struct dpaa2_et
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ if (fd_format == dpaa2_fd_single) {
+- skb = dpaa2_eth_build_linear_skb(priv, ch, fd, vaddr);
++ skb = build_linear_skb(priv, ch, fd, vaddr);
+ } else if (fd_format == dpaa2_fd_sg) {
+ struct dpaa2_sg_entry *sgt =
+ vaddr + dpaa2_fd_get_offset(fd);
+- skb = dpaa2_eth_build_frag_skb(priv, ch, sgt);
++ skb = build_frag_skb(priv, ch, sgt);
+ put_page(virt_to_head_page(vaddr));
+ percpu_extras->rx_sg_frames++;
+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
+ } else {
+ /* We don't support any other format */
+- netdev_err(priv->net_dev, "Received invalid frame format\n");
+ goto err_frame_format;
+ }
+
+- if (unlikely(!skb)) {
+- dev_err_once(dev, "error building skb\n");
++ if (unlikely(!skb))
+ goto err_build_skb;
+- }
+
+ prefetch(skb->data);
+
++ /* Get the timestamp value */
+ if (priv->ts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+- u64 *ns = (u64 *) (vaddr +
+- priv->buf_layout.private_data_size +
+- sizeof(struct dpaa2_fas));
++ u64 *ns = (u64 *)(vaddr +
++ priv->buf_layout.private_data_size +
++ sizeof(struct dpaa2_fas));
+
+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+@@ -293,7 +272,7 @@ static void dpaa2_eth_rx(struct dpaa2_et
+ fas = (struct dpaa2_fas *)
+ (vaddr + priv->buf_layout.private_data_size);
+ status = le32_to_cpu(fas->status);
+- dpaa2_eth_rx_csum(priv, status, skb);
++ validate_rx_csum(priv, status, skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+@@ -309,11 +288,14 @@ static void dpaa2_eth_rx(struct dpaa2_et
+ return;
+ err_frame_format:
+ err_build_skb:
+- dpaa2_eth_free_rx_fd(priv, fd, vaddr);
++ free_rx_fd(priv, fd, vaddr);
+ percpu_stats->rx_dropped++;
+ }
+
+ #ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++/* Processing of Rx frames received on the error FQ
++ * We check and print the error bits and then free the frame
++ */
+ static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+@@ -326,21 +308,18 @@ static void dpaa2_eth_rx_err(struct dpaa
+ struct dpaa2_fas *fas;
+ u32 status = 0;
+
+- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ vaddr = phys_to_virt(addr);
+
+ if (fd->simple.frc & DPAA2_FD_FRC_FASV) {
+ fas = (struct dpaa2_fas *)
+ (vaddr + priv->buf_layout.private_data_size);
+ status = le32_to_cpu(fas->status);
+-
+- /* All frames received on this queue should have at least
+- * one of the Rx error bits set */
+- WARN_ON_ONCE((status & DPAA2_ETH_RX_ERR_MASK) == 0);
+- netdev_dbg(priv->net_dev, "Rx frame error: 0x%08x\n",
+- status & DPAA2_ETH_RX_ERR_MASK);
++ if (net_ratelimit())
++ netdev_warn(priv->net_dev, "Rx frame error: 0x%08x\n",
++ status & DPAA2_ETH_RX_ERR_MASK);
+ }
+- dpaa2_eth_free_rx_fd(priv, fd, vaddr);
++ free_rx_fd(priv, fd, vaddr);
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_stats->rx_errors++;
+@@ -353,7 +332,7 @@ static void dpaa2_eth_rx_err(struct dpaa
+ *
+ * Observance of NAPI budget is not our concern, leaving that to the caller.
+ */
+-static int dpaa2_eth_store_consume(struct dpaa2_eth_channel *ch)
++static int consume_frames(struct dpaa2_eth_channel *ch)
+ {
+ struct dpaa2_eth_priv *priv = ch->priv;
+ struct dpaa2_eth_fq *fq;
+@@ -365,20 +344,14 @@ static int dpaa2_eth_store_consume(struc
+ do {
+ dq = dpaa2_io_store_next(ch->store, &is_last);
+ if (unlikely(!dq)) {
+- if (unlikely(!is_last)) {
+- netdev_dbg(priv->net_dev,
+- "Channel %d reqturned no valid frames\n",
+- ch->ch_id);
+- /* MUST retry until we get some sort of
+- * valid response token (be it "empty dequeue"
+- * or a valid frame).
+- */
+- continue;
+- }
+- break;
++ /* If we're here, we *must* have placed a
++ * volatile dequeue comnmand, so keep reading through
++ * the store until we get some sort of valid response
++ * token (either a valid frame or an "empty dequeue")
++ */
++ continue;
+ }
+
+- /* Obtain FD and process it */
+ fd = dpaa2_dq_fd(dq);
+ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
+ fq->stats.frames++;
+@@ -390,9 +363,10 @@ static int dpaa2_eth_store_consume(struc
+ return cleaned;
+ }
+
+-static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
+- struct sk_buff *skb,
+- struct dpaa2_fd *fd)
++/* Create a frame descriptor based on a fragmented skb */
++static int build_sg_fd(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb,
++ struct dpaa2_fd *fd)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sgt_buf = NULL;
+@@ -404,14 +378,16 @@ static int dpaa2_eth_build_sg_fd(struct
+ struct scatterlist *scl, *crt_scl;
+ int num_sg;
+ int num_dma_bufs;
+- struct dpaa2_eth_swa *bps;
++ struct dpaa2_eth_swa *swa;
+
+ /* Create and map scatterlist.
+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
+ * to go beyond nr_frags+1.
+ * Note: We don't support chained scatterlists
+ */
+- WARN_ON(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1);
++ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
++ return -EINVAL;
++
+ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (unlikely(!scl))
+ return -ENOMEM;
+@@ -420,7 +396,6 @@ static int dpaa2_eth_build_sg_fd(struct
+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+ if (unlikely(!num_dma_bufs)) {
+- netdev_err(priv->net_dev, "dma_map_sg() error\n");
+ err = -ENOMEM;
+ goto dma_map_sg_failed;
+ }
+@@ -430,7 +405,6 @@ static int dpaa2_eth_build_sg_fd(struct
+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
+ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
+ if (unlikely(!sgt_buf)) {
+- netdev_err(priv->net_dev, "failed to allocate SGT buffer\n");
+ err = -ENOMEM;
+ goto sgt_buf_alloc_failed;
+ }
+@@ -462,19 +436,19 @@ static int dpaa2_eth_build_sg_fd(struct
+ * Fit the scatterlist and the number of buffers alongside the
+ * skb backpointer in the SWA. We'll need all of them on Tx Conf.
+ */
+- bps = (struct dpaa2_eth_swa *)sgt_buf;
+- bps->skb = skb;
+- bps->scl = scl;
+- bps->num_sg = num_sg;
+- bps->num_dma_bufs = num_dma_bufs;
++ swa = (struct dpaa2_eth_swa *)sgt_buf;
++ swa->skb = skb;
++ swa->scl = scl;
++ swa->num_sg = num_sg;
++ swa->num_dma_bufs = num_dma_bufs;
+
++ /* Hardware expects the SG table to be in little endian format */
+ for (j = 0; j < i; j++)
+ dpaa2_sg_cpu_to_le(&sgt[j]);
+
+ /* Separately map the SGT buffer */
+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+- netdev_err(priv->net_dev, "dma_map_single() failed\n");
+ err = -ENOMEM;
+ goto dma_map_single_failed;
+ }
+@@ -484,7 +458,7 @@ static int dpaa2_eth_build_sg_fd(struct
+ dpaa2_fd_set_len(fd, skb->len);
+
+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
+- DPAA2_FD_CTRL_PTV1;
++ DPAA2_FD_CTRL_PTV1;
+
+ return 0;
+
+@@ -497,9 +471,10 @@ dma_map_sg_failed:
+ return err;
+ }
+
+-static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
+- struct sk_buff *skb,
+- struct dpaa2_fd *fd)
++/* Create a frame descriptor based on a linear skb */
++static int build_single_fd(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb,
++ struct dpaa2_fd *fd)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
+ u8 *buffer_start;
+@@ -524,14 +499,11 @@ static int dpaa2_eth_build_single_fd(str
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+- addr = dma_map_single(dev,
+- buffer_start,
++ addr = dma_map_single(dev, buffer_start,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_TO_DEVICE);
+- if (unlikely(dma_mapping_error(dev, addr))) {
+- dev_err(dev, "dma_map_single() failed\n");
+- return -EINVAL;
+- }
++ if (unlikely(dma_mapping_error(dev, addr)))
++ return -ENOMEM;
+
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
+@@ -539,21 +511,23 @@ static int dpaa2_eth_build_single_fd(str
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+
+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
+- DPAA2_FD_CTRL_PTV1;
++ DPAA2_FD_CTRL_PTV1;
+
+ return 0;
+ }
+
+-/* DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
++/* FD freeing routine on the Tx path
++ *
++ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
+ * back-pointed to is also freed.
+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
+ * dpaa2_eth_tx().
+ * Optionally, return the frame annotation status word (FAS), which needs
+ * to be checked if we're on the confirmation path.
+ */
+-static void dpaa2_eth_free_fd(const struct dpaa2_eth_priv *priv,
+- const struct dpaa2_fd *fd,
+- u32 *status)
++static void free_tx_fd(const struct dpaa2_eth_priv *priv,
++ const struct dpaa2_fd *fd,
++ u32 *status)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t fd_addr;
+@@ -562,7 +536,7 @@ static void dpaa2_eth_free_fd(const stru
+ int unmap_size;
+ struct scatterlist *scl;
+ int num_sg, num_dma_bufs;
+- struct dpaa2_eth_swa *bps;
++ struct dpaa2_eth_swa *swa;
+ bool fd_single;
+ struct dpaa2_fas *fas;
+
+@@ -580,11 +554,11 @@ static void dpaa2_eth_free_fd(const stru
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_TO_DEVICE);
+ } else {
+- bps = (struct dpaa2_eth_swa *)skbh;
+- skb = bps->skb;
+- scl = bps->scl;
+- num_sg = bps->num_sg;
+- num_dma_bufs = bps->num_dma_bufs;
++ swa = (struct dpaa2_eth_swa *)skbh;
++ skb = swa->skb;
++ scl = swa->scl;
++ num_sg = swa->num_sg;
++ num_dma_bufs = swa->num_dma_bufs;
+
+ /* Unmap the scatterlist */
+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+@@ -596,6 +570,7 @@ static void dpaa2_eth_free_fd(const stru
+ dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE);
+ }
+
++ /* Get the timestamp value */
+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 *ns;
+@@ -610,8 +585,9 @@ static void dpaa2_eth_free_fd(const stru
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+- /* Check the status from the Frame Annotation after we unmap the first
+- * buffer but before we free it.
++ /* Read the status from the Frame Annotation after we unmap the first
++ * buffer but before we free it. The caller function is responsible
++ * for checking the status value.
+ */
+ if (status && (fd->simple.frc & DPAA2_FD_FRC_FASV)) {
+ fas = (struct dpaa2_fas *)
+@@ -632,24 +608,16 @@ static int dpaa2_eth_tx(struct sk_buff *
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_fd fd;
+ struct rtnl_link_stats64 *percpu_stats;
+- struct dpaa2_eth_stats *percpu_extras;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ u16 queue_mapping, flow_id;
+ int err, i;
+- /* TxConf FQ selection primarily based on cpu affinity; this is
+- * non-migratable context, so it's safe to call smp_processor_id().
+- */
+- u16 queue_mapping = smp_processor_id() % priv->dpni_attrs.max_senders;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+- /* Setup the FD fields */
+- memset(&fd, 0, sizeof(fd));
+-
+ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
+ struct sk_buff *ns;
+
+- dev_info_once(net_dev->dev.parent,
+- "skb headroom too small, must realloc.\n");
+ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
+ if (unlikely(!ns)) {
+ percpu_stats->tx_dropped++;
+@@ -664,18 +632,20 @@ static int dpaa2_eth_tx(struct sk_buff *
+ */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+- netdev_err(net_dev, "Out of memory for skb_unshare()");
+ /* skb_unshare() has already freed the skb */
+ percpu_stats->tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
++ /* Setup the FD fields */
++ memset(&fd, 0, sizeof(fd));
++
+ if (skb_is_nonlinear(skb)) {
+- err = dpaa2_eth_build_sg_fd(priv, skb, &fd);
++ err = build_sg_fd(priv, skb, &fd);
+ percpu_extras->tx_sg_frames++;
+ percpu_extras->tx_sg_bytes += skb->len;
+ } else {
+- err = dpaa2_eth_build_single_fd(priv, skb, &fd);
++ err = build_single_fd(priv, skb, &fd);
+ }
+
+ if (unlikely(err)) {
+@@ -686,19 +656,22 @@ static int dpaa2_eth_tx(struct sk_buff *
+ /* Tracing point */
+ trace_dpaa2_tx_fd(net_dev, &fd);
+
++ /* TxConf FQ selection primarily based on cpu affinity; this is
++ * non-migratable context, so it's safe to call smp_processor_id().
++ */
++ queue_mapping = smp_processor_id() % priv->dpni_attrs.max_senders;
++ flow_id = priv->fq[queue_mapping].flowid;
+ for (i = 0; i < (DPAA2_ETH_MAX_TX_QUEUES << 1); i++) {
+ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
+- priv->fq[queue_mapping].flowid,
+- &fd);
++ flow_id, &fd);
+ if (err != -EBUSY)
+ break;
+ }
+ percpu_extras->tx_portal_busy += i;
+ if (unlikely(err < 0)) {
+- netdev_dbg(net_dev, "error enqueueing Tx frame\n");
+ percpu_stats->tx_errors++;
+ /* Clean up everything, including freeing the skb */
+- dpaa2_eth_free_fd(priv, &fd, NULL);
++ free_tx_fd(priv, &fd, NULL);
+ } else {
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += skb->len;
+@@ -713,13 +686,14 @@ err_alloc_headroom:
+ return NETDEV_TX_OK;
+ }
+
++/* Tx confirmation frame processing routine */
+ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct napi_struct *napi __always_unused)
+ {
+ struct rtnl_link_stats64 *percpu_stats;
+- struct dpaa2_eth_stats *percpu_extras;
++ struct dpaa2_eth_drv_stats *percpu_extras;
+ u32 status = 0;
+
+ /* Tracing point */
+@@ -729,18 +703,16 @@ static void dpaa2_eth_tx_conf(struct dpa
+ percpu_extras->tx_conf_frames++;
+ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+
+- dpaa2_eth_free_fd(priv, fd, &status);
++ free_tx_fd(priv, fd, &status);
+
+ if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) {
+- netdev_err(priv->net_dev, "TxConf frame error(s): 0x%08x\n",
+- status & DPAA2_ETH_TXCONF_ERR_MASK);
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ /* Tx-conf logically pertains to the egress path. */
+ percpu_stats->tx_errors++;
+ }
+ }
+
+-static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
++static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+ {
+ int err;
+
+@@ -763,7 +735,7 @@ static int dpaa2_eth_set_rx_csum(struct
+ return 0;
+ }
+
+-static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
++static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+ {
+ struct dpaa2_eth_fq *fq;
+ struct dpni_tx_flow_cfg tx_flow_cfg;
+@@ -793,37 +765,38 @@ static int dpaa2_eth_set_tx_csum(struct
+ return 0;
+ }
+
+-static int dpaa2_bp_add_7(struct dpaa2_eth_priv *priv, u16 bpid)
++/* Perform a single release command to add buffers
++ * to the specified buffer pool
++ */
++static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
+- u64 buf_array[7];
++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ void *buf;
+ dma_addr_t addr;
+ int i;
+
+- for (i = 0; i < 7; i++) {
++ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+ /* Allocate buffer visible to WRIOP + skb shared info +
+ * alignment padding
+ */
+ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
+- if (unlikely(!buf)) {
+- dev_err(dev, "buffer allocation failed\n");
++ if (unlikely(!buf))
+ goto err_alloc;
+- }
++
+ buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
+
+- addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUFFER_SIZE,
++ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(dev, addr))) {
+- dev_err(dev, "dma_map_single() failed\n");
++ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
+- }
++
+ buf_array[i] = addr;
+
+ /* tracing point */
+ trace_dpaa2_eth_buf_seed(priv->net_dev,
+ buf, DPAA2_ETH_BUF_RAW_SIZE,
+- addr, DPAA2_ETH_RX_BUFFER_SIZE,
++ addr, DPAA2_ETH_RX_BUF_SIZE,
+ bpid);
+ }
+
+@@ -850,59 +823,57 @@ err_alloc:
+ return 0;
+ }
+
+-static int dpaa2_dpbp_seed(struct dpaa2_eth_priv *priv, u16 bpid)
++static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+ {
+ int i, j;
+ int new_count;
+
+ /* This is the lazy seeding of Rx buffer pools.
+- * dpaa2_bp_add_7() is also used on the Rx hotpath and calls
++ * dpaa2_add_bufs() is also used on the Rx hotpath and calls
+ * napi_alloc_frag(). The trouble with that is that it in turn ends up
+ * calling this_cpu_ptr(), which mandates execution in atomic context.
+ * Rather than splitting up the code, do a one-off preempt disable.
+ */
+ preempt_disable();
+ for (j = 0; j < priv->num_channels; j++) {
+- for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += 7) {
+- new_count = dpaa2_bp_add_7(priv, bpid);
++ for (i = 0; i < DPAA2_ETH_NUM_BUFS;
++ i += DPAA2_ETH_BUFS_PER_CMD) {
++ new_count = add_bufs(priv, bpid);
+ priv->channel[j]->buf_count += new_count;
+
+- if (new_count < 7) {
++ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
+ preempt_enable();
+- goto out_of_memory;
++ return -ENOMEM;
+ }
+ }
+ }
+ preempt_enable();
+
+ return 0;
+-
+-out_of_memory:
+- return -ENOMEM;
+ }
+
+ /**
+ * Drain the specified number of buffers from the DPNI's private buffer pool.
+- * @count must not exceeed 7
++ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
+ */
+-static void dpaa2_dpbp_drain_cnt(struct dpaa2_eth_priv *priv, int count)
++static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
+- u64 buf_array[7];
++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ void *vaddr;
+ int ret, i;
+
+ do {
+ ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid,
+- buf_array, count);
++ buf_array, count);
+ if (ret < 0) {
+- pr_err("dpaa2_io_service_acquire() failed\n");
++ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
+ return;
+ }
+ for (i = 0; i < ret; i++) {
+ /* Same logic as on regular Rx path */
+ dma_unmap_single(dev, buf_array[i],
+- DPAA2_ETH_RX_BUFFER_SIZE,
++ DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ vaddr = phys_to_virt(buf_array[i]);
+ put_page(virt_to_head_page(vaddr));
+@@ -910,12 +881,12 @@ static void dpaa2_dpbp_drain_cnt(struct
+ } while (ret);
+ }
+
+-static void __dpaa2_dpbp_free(struct dpaa2_eth_priv *priv)
++static void drain_pool(struct dpaa2_eth_priv *priv)
+ {
+ int i;
+
+- dpaa2_dpbp_drain_cnt(priv, 7);
+- dpaa2_dpbp_drain_cnt(priv, 1);
++ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
++ drain_bufs(priv, 1);
+
+ for (i = 0; i < priv->num_channels; i++)
+ priv->channel[i]->buf_count = 0;
+@@ -924,50 +895,55 @@ static void __dpaa2_dpbp_free(struct dpa
+ /* Function is called from softirq context only, so we don't need to guard
+ * the access to percpu count
+ */
+-static int dpaa2_dpbp_refill(struct dpaa2_eth_priv *priv,
+- struct dpaa2_eth_channel *ch,
+- u16 bpid)
++static int refill_pool(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ u16 bpid)
+ {
+ int new_count;
+- int err = 0;
+
+- if (unlikely(ch->buf_count < DPAA2_ETH_REFILL_THRESH)) {
+- do {
+- new_count = dpaa2_bp_add_7(priv, bpid);
+- if (unlikely(!new_count)) {
+- /* Out of memory; abort for now, we'll
+- * try later on
+- */
+- break;
+- }
+- ch->buf_count += new_count;
+- } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
++ if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
++ return 0;
+
+- if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
+- err = -ENOMEM;
+- }
++ do {
++ new_count = add_bufs(priv, bpid);
++ if (unlikely(!new_count)) {
++ /* Out of memory; abort for now, we'll try later on */
++ break;
++ }
++ ch->buf_count += new_count;
++ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
+
+- return err;
++ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
++ return -ENOMEM;
++
++ return 0;
+ }
+
+-static int __dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
++static int pull_channel(struct dpaa2_eth_channel *ch)
+ {
+ int err;
+ int dequeues = -1;
+- struct dpaa2_eth_priv *priv = ch->priv;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
+ dequeues++;
++ cpu_relax();
+ } while (err == -EBUSY);
+- if (unlikely(err))
+- netdev_err(priv->net_dev, "dpaa2_io_service_pull err %d", err);
+
+ ch->stats.dequeue_portal_busy += dequeues;
++ if (unlikely(err))
++ ch->stats.pull_err++;
++
+ return err;
+ }
+
++/* NAPI poll routine
++ *
++ * Frames are dequeued from the QMan channel associated with this NAPI context.
++ * Rx, Tx confirmation and (if configured) Rx error frames all count
++ * towards the NAPI budget.
++ */
+ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+ {
+ struct dpaa2_eth_channel *ch;
+@@ -978,32 +954,32 @@ static int dpaa2_eth_poll(struct napi_st
+ ch = container_of(napi, struct dpaa2_eth_channel, napi);
+ priv = ch->priv;
+
+- __dpaa2_eth_pull_channel(ch);
++ while (cleaned < budget) {
++ err = pull_channel(ch);
++ if (unlikely(err))
++ break;
+
+- do {
+ /* Refill pool if appropriate */
+- dpaa2_dpbp_refill(priv, ch, priv->dpbp_attrs.bpid);
++ refill_pool(priv, ch, priv->dpbp_attrs.bpid);
+
+- store_cleaned = dpaa2_eth_store_consume(ch);
++ store_cleaned = consume_frames(ch);
+ cleaned += store_cleaned;
+
++ /* If we have enough budget left for a full store,
++ * try a new pull dequeue, otherwise we're done here
++ */
+ if (store_cleaned == 0 ||
+ cleaned > budget - DPAA2_ETH_STORE_SIZE)
+ break;
+-
+- /* Try to dequeue some more */
+- err = __dpaa2_eth_pull_channel(ch);
+- if (unlikely(err))
+- break;
+- } while (1);
++ }
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
+- err = dpaa2_io_service_rearm(NULL, &ch->nctx);
+- if (unlikely(err))
+- netdev_err(priv->net_dev,
+- "Notif rearm failed for channel %d\n",
+- ch->ch_id);
++ /* Re-enable data available notifications */
++ do {
++ err = dpaa2_io_service_rearm(NULL, &ch->nctx);
++ cpu_relax();
++ } while (err == -EBUSY);
+ }
+
+ ch->stats.frames += cleaned;
+@@ -1011,7 +987,7 @@ static int dpaa2_eth_poll(struct napi_st
+ return cleaned;
+ }
+
+-static void dpaa2_eth_napi_enable(struct dpaa2_eth_priv *priv)
++static void enable_ch_napi(struct dpaa2_eth_priv *priv)
+ {
+ struct dpaa2_eth_channel *ch;
+ int i;
+@@ -1022,7 +998,7 @@ static void dpaa2_eth_napi_enable(struct
+ }
+ }
+
+-static void dpaa2_eth_napi_disable(struct dpaa2_eth_priv *priv)
++static void disable_ch_napi(struct dpaa2_eth_priv *priv)
+ {
+ struct dpaa2_eth_channel *ch;
+ int i;
+@@ -1033,7 +1009,7 @@ static void dpaa2_eth_napi_disable(struc
+ }
+ }
+
+-static int dpaa2_link_state_update(struct dpaa2_eth_priv *priv)
++static int link_state_update(struct dpaa2_eth_priv *priv)
+ {
+ struct dpni_link_state state;
+ int err;
+@@ -1069,7 +1045,7 @@ static int dpaa2_eth_open(struct net_dev
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+- err = dpaa2_dpbp_seed(priv, priv->dpbp_attrs.bpid);
++ err = seed_pool(priv, priv->dpbp_attrs.bpid);
+ if (err) {
+ /* Not much to do; the buffer pool, though not filled up,
+ * may still contain some buffers which would enable us
+@@ -1084,7 +1060,7 @@ static int dpaa2_eth_open(struct net_dev
+ * immediately after dpni_enable();
+ */
+ netif_tx_stop_all_queues(net_dev);
+- dpaa2_eth_napi_enable(priv);
++ enable_ch_napi(priv);
+ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
+ * return true and cause 'ip link show' to report the LOWER_UP flag,
+ * even though the link notification wasn't even received.
+@@ -1093,16 +1069,16 @@ static int dpaa2_eth_open(struct net_dev
+
+ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
+ if (err < 0) {
+- dev_err(net_dev->dev.parent, "dpni_enable() failed\n");
++ netdev_err(net_dev, "dpni_enable() failed\n");
+ goto enable_err;
+ }
+
+ /* If the DPMAC object has already processed the link up interrupt,
+ * we have to learn the link state ourselves.
+ */
+- err = dpaa2_link_state_update(priv);
++ err = link_state_update(priv);
+ if (err < 0) {
+- dev_err(net_dev->dev.parent, "Can't update link state\n");
++ netdev_err(net_dev, "Can't update link state\n");
+ goto link_state_err;
+ }
+
+@@ -1110,26 +1086,84 @@ static int dpaa2_eth_open(struct net_dev
+
+ link_state_err:
+ enable_err:
+- dpaa2_eth_napi_disable(priv);
+- __dpaa2_dpbp_free(priv);
++ disable_ch_napi(priv);
++ drain_pool(priv);
+ return err;
+ }
+
++/* The DPIO store must be empty when we call this,
++ * at the end of every NAPI cycle.
++ */
++static u32 drain_channel(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch)
++{
++ u32 drained = 0, total = 0;
++
++ do {
++ pull_channel(ch);
++ drained = consume_frames(ch);
++ total += drained;
++ } while (drained);
++
++ return total;
++}
++
++static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_eth_channel *ch;
++ int i;
++ u32 drained = 0;
++
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ drained += drain_channel(priv, ch);
++ }
++
++ return drained;
++}
++
+ static int dpaa2_eth_stop(struct net_device *net_dev)
+ {
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int dpni_enabled;
++ int retries = 10;
++ u32 drained;
+
+- /* Stop Tx and Rx traffic */
+ netif_tx_stop_all_queues(net_dev);
+ netif_carrier_off(net_dev);
+- dpni_disable(priv->mc_io, 0, priv->mc_token);
+
+- msleep(500);
++ /* Loop while dpni_disable() attempts to drain the egress FQs
++ * and confirm them back to us.
++ */
++ do {
++ dpni_disable(priv->mc_io, 0, priv->mc_token);
++ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
++ if (dpni_enabled)
++ /* Allow the MC some slack */
++ msleep(100);
++ } while (dpni_enabled && --retries);
++ if (!retries) {
++ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
++ /* Must go on and disable NAPI nonetheless, so we don't crash at
++ * the next "ifconfig up"
++ */
++ }
+
+- dpaa2_eth_napi_disable(priv);
+- msleep(100);
++ /* Wait for NAPI to complete on every core and disable it.
++ * In particular, this will also prevent NAPI from being rescheduled if
++ * a new CDAN is serviced, effectively discarding the CDAN. We therefore
++ * don't even need to disarm the channels, except perhaps for the case
++ * of a huge coalescing value.
++ */
++ disable_ch_napi(priv);
++
++ /* Manually drain the Rx and TxConf queues */
++ drained = drain_ingress_frames(priv);
++ if (drained)
++ netdev_dbg(net_dev, "Drained %d frames.\n", drained);
+
+- __dpaa2_dpbp_free(priv);
++ /* Empty the buffer pool */
++ drain_pool(priv);
+
+ return 0;
+ }
+@@ -1138,7 +1172,7 @@ static int dpaa2_eth_init(struct net_dev
+ {
+ u64 supported = 0;
+ u64 not_supported = 0;
+- const struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u32 options = priv->dpni_attrs.options;
+
+ /* Capabilities listing */
+@@ -1230,7 +1264,7 @@ static int dpaa2_eth_change_mtu(struct n
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
+ (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
+ if (err) {
+- netdev_err(net_dev, "dpni_set_mfl() failed\n");
++ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
+ return err;
+ }
+
+@@ -1238,18 +1272,11 @@ static int dpaa2_eth_change_mtu(struct n
+ return 0;
+ }
+
+-/* Convenience macro to make code littered with error checking more readable */
+-#define DPAA2_ETH_WARN_IF_ERR(err, netdevp, format, ...) \
+-do { \
+- if (err) \
+- netdev_warn(netdevp, format, ##__VA_ARGS__); \
+-} while (0)
+-
+ /* Copy mac unicast addresses from @net_dev to @priv.
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+-static void _dpaa2_eth_hw_add_uc_addr(const struct net_device *net_dev,
+- struct dpaa2_eth_priv *priv)
++static void add_uc_hw_addr(const struct net_device *net_dev,
++ struct dpaa2_eth_priv *priv)
+ {
+ struct netdev_hw_addr *ha;
+ int err;
+@@ -1257,17 +1284,18 @@ static void _dpaa2_eth_hw_add_uc_addr(co
+ netdev_for_each_uc_addr(ha, net_dev) {
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+ ha->addr);
+- DPAA2_ETH_WARN_IF_ERR(err, priv->net_dev,
+- "Could not add ucast MAC %pM to the filtering table (err %d)\n",
+- ha->addr, err);
++ if (err)
++ netdev_warn(priv->net_dev,
++ "Could not add ucast MAC %pM to the filtering table (err %d)\n",
++ ha->addr, err);
+ }
+ }
+
+ /* Copy mac multicast addresses from @net_dev to @priv
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+-static void _dpaa2_eth_hw_add_mc_addr(const struct net_device *net_dev,
+- struct dpaa2_eth_priv *priv)
++static void add_mc_hw_addr(const struct net_device *net_dev,
++ struct dpaa2_eth_priv *priv)
+ {
+ struct netdev_hw_addr *ha;
+ int err;
+@@ -1275,9 +1303,10 @@ static void _dpaa2_eth_hw_add_mc_addr(co
+ netdev_for_each_mc_addr(ha, net_dev) {
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+ ha->addr);
+- DPAA2_ETH_WARN_IF_ERR(err, priv->net_dev,
+- "Could not add mcast MAC %pM to the filtering table (err %d)\n",
+- ha->addr, err);
++ if (err)
++ netdev_warn(priv->net_dev,
++ "Could not add mcast MAC %pM to the filtering table (err %d)\n",
++ ha->addr, err);
+ }
+ }
+
+@@ -1296,11 +1325,11 @@ static void dpaa2_eth_set_rx_mode(struct
+ /* Basic sanity checks; these probably indicate a misconfiguration */
+ if (!(options & DPNI_OPT_UNICAST_FILTER) && max_uc != 0)
+ netdev_info(net_dev,
+- "max_unicast_filters=%d, you must have DPNI_OPT_UNICAST_FILTER in the DPL\n",
++ "max_unicast_filters=%d, DPNI_OPT_UNICAST_FILTER option must be enabled\n",
+ max_uc);
+ if (!(options & DPNI_OPT_MULTICAST_FILTER) && max_mc != 0)
+ netdev_info(net_dev,
+- "max_multicast_filters=%d, you must have DPNI_OPT_MULTICAST_FILTER in the DPL\n",
++ "max_multicast_filters=%d, DPNI_OPT_MULTICAST_FILTER option must be enabled\n",
+ max_mc);
+
+ /* Force promiscuous if the uc or mc counts exceed our capabilities. */
+@@ -1318,9 +1347,9 @@ static void dpaa2_eth_set_rx_mode(struct
+ }
+
+ /* Adjust promisc settings due to flag combinations */
+- if (net_dev->flags & IFF_PROMISC) {
++ if (net_dev->flags & IFF_PROMISC)
+ goto force_promisc;
+- } else if (net_dev->flags & IFF_ALLMULTI) {
++ if (net_dev->flags & IFF_ALLMULTI) {
+ /* First, rebuild unicast filtering table. This should be done
+ * in promisc mode, in order to avoid frame loss while we
+ * progressively add entries to the table.
+@@ -1329,16 +1358,19 @@ static void dpaa2_eth_set_rx_mode(struct
+ * nonetheless.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set uc promisc\n");
++ if (err)
++ netdev_warn(net_dev, "Can't set uc promisc\n");
+
+ /* Actual uc table reconstruction. */
+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
+- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear uc filters\n");
+- _dpaa2_eth_hw_add_uc_addr(net_dev, priv);
++ if (err)
++ netdev_warn(net_dev, "Can't clear uc filters\n");
++ add_uc_hw_addr(net_dev, priv);
+
+ /* Finally, clear uc promisc and set mc promisc as requested. */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear uc promisc\n");
++ if (err)
++ netdev_warn(net_dev, "Can't clear uc promisc\n");
+ goto force_mc_promisc;
+ }
+
+@@ -1346,32 +1378,39 @@ static void dpaa2_eth_set_rx_mode(struct
+ * For now, rebuild mac filtering tables while forcing both of them on.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set uc promisc (%d)\n", err);
++ if (err)
++ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set mc promisc (%d)\n", err);
++ if (err)
++ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
+
+ /* Actual mac filtering tables reconstruction */
+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
+- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear mac filters\n");
+- _dpaa2_eth_hw_add_mc_addr(net_dev, priv);
+- _dpaa2_eth_hw_add_uc_addr(net_dev, priv);
++ if (err)
++ netdev_warn(net_dev, "Can't clear mac filters\n");
++ add_mc_hw_addr(net_dev, priv);
++ add_uc_hw_addr(net_dev, priv);
+
+ /* Now we can clear both ucast and mcast promisc, without risking
+ * to drop legitimate frames anymore.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear ucast promisc\n");
++ if (err)
++ netdev_warn(net_dev, "Can't clear ucast promisc\n");
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
+- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't clear mcast promisc\n");
++ if (err)
++ netdev_warn(net_dev, "Can't clear mcast promisc\n");
+
+ return;
+
+ force_promisc:
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set ucast promisc\n");
++ if (err)
++ netdev_warn(net_dev, "Can't set ucast promisc\n");
+ force_mc_promisc:
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+- DPAA2_ETH_WARN_IF_ERR(err, net_dev, "Can't set mcast promisc\n");
++ if (err)
++ netdev_warn(net_dev, "Can't set mcast promisc\n");
+ }
+
+ static int dpaa2_eth_set_features(struct net_device *net_dev,
+@@ -1379,20 +1418,19 @@ static int dpaa2_eth_set_features(struct
+ {
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ netdev_features_t changed = features ^ net_dev->features;
++ bool enable;
+ int err;
+
+ if (changed & NETIF_F_RXCSUM) {
+- bool enable = !!(features & NETIF_F_RXCSUM);
+-
+- err = dpaa2_eth_set_rx_csum(priv, enable);
++ enable = !!(features & NETIF_F_RXCSUM);
++ err = set_rx_csum(priv, enable);
+ if (err)
+ return err;
+ }
+
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+- bool enable = !!(features &
+- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+- err = dpaa2_eth_set_tx_csum(priv, enable);
++ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
++ err = set_tx_csum(priv, enable);
+ if (err)
+ return err;
+ }
+@@ -1419,9 +1457,9 @@ static int dpaa2_eth_ts_ioctl(struct net
+ return -ERANGE;
+ }
+
+- if (config.rx_filter == HWTSTAMP_FILTER_NONE)
++ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ priv->ts_rx_en = false;
+- else {
++ } else {
+ priv->ts_rx_en = true;
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+@@ -1435,8 +1473,8 @@ static int dpaa2_eth_ioctl(struct net_de
+ {
+ if (cmd == SIOCSHWTSTAMP)
+ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
+- else
+- return -EINVAL;
++
++ return -EINVAL;
+ }
+
+ static const struct net_device_ops dpaa2_eth_ops = {
+@@ -1452,7 +1490,7 @@ static const struct net_device_ops dpaa2
+ .ndo_do_ioctl = dpaa2_eth_ioctl,
+ };
+
+-static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
++static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
+ {
+ struct dpaa2_eth_channel *ch;
+
+@@ -1464,37 +1502,9 @@ static void dpaa2_eth_cdan_cb(struct dpa
+ napi_schedule_irqoff(&ch->napi);
+ }
+
+-static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
+-{
+- int i;
+-
+- /* We have one TxConf FQ per Tx flow */
+- for (i = 0; i < priv->dpni_attrs.max_senders; i++) {
+- priv->fq[priv->num_fqs].netdev_priv = priv;
+- priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
+- priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
+- priv->fq[priv->num_fqs++].flowid = DPNI_NEW_FLOW_ID;
+- }
+-
+- /* The number of Rx queues (Rx distribution width) may be different from
+- * the number of cores.
+- * We only support one traffic class for now.
+- */
+- for (i = 0; i < dpaa2_queue_count(priv); i++) {
+- priv->fq[priv->num_fqs].netdev_priv = priv;
+- priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+- priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+- priv->fq[priv->num_fqs++].flowid = (u16)i;
+- }
+-
+-#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
+- /* We have exactly one Rx error queue per DPNI */
+- priv->fq[priv->num_fqs].netdev_priv = priv;
+- priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
+- priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
+-#endif
+-}
+-
++/* Verify that the FLIB API version of various MC objects is supported
++ * by our driver
++ */
+ static int check_obj_version(struct fsl_mc_device *ls_dev, u16 mc_version)
+ {
+ char *name = ls_dev->obj_desc.type;
+@@ -1517,8 +1527,7 @@ static int check_obj_version(struct fsl_
+
+ /* Check that the FLIB-defined version matches the one reported by MC */
+ if (mc_version != flib_version) {
+- dev_err(dev,
+- "%s FLIB version mismatch: MC reports %d, we have %d\n",
++ dev_err(dev, "%s FLIB version mismatch: MC reports %d, we have %d\n",
+ name, mc_version, flib_version);
+ return -EINVAL;
+ }
+@@ -1534,7 +1543,8 @@ static int check_obj_version(struct fsl_
+ return 0;
+ }
+
+-static struct fsl_mc_device *dpaa2_dpcon_setup(struct dpaa2_eth_priv *priv)
++/* Allocate and configure a DPCON object */
++static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
+ {
+ struct fsl_mc_device *dpcon;
+ struct device *dev = priv->net_dev->dev.parent;
+@@ -1582,8 +1592,8 @@ err_open:
+ return NULL;
+ }
+
+-static void dpaa2_dpcon_free(struct dpaa2_eth_priv *priv,
+- struct fsl_mc_device *dpcon)
++static void free_dpcon(struct dpaa2_eth_priv *priv,
++ struct fsl_mc_device *dpcon)
+ {
+ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+@@ -1591,7 +1601,7 @@ static void dpaa2_dpcon_free(struct dpaa
+ }
+
+ static struct dpaa2_eth_channel *
+-dpaa2_alloc_channel(struct dpaa2_eth_priv *priv)
++alloc_channel(struct dpaa2_eth_priv *priv)
+ {
+ struct dpaa2_eth_channel *channel;
+ struct dpcon_attr attr;
+@@ -1599,12 +1609,10 @@ dpaa2_alloc_channel(struct dpaa2_eth_pri
+ int err;
+
+ channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
+- if (!channel) {
+- dev_err(dev, "Memory allocation failed\n");
++ if (!channel)
+ return NULL;
+- }
+
+- channel->dpcon = dpaa2_dpcon_setup(priv);
++ channel->dpcon = setup_dpcon(priv);
+ if (!channel->dpcon)
+ goto err_setup;
+
+@@ -1622,20 +1630,23 @@ dpaa2_alloc_channel(struct dpaa2_eth_pri
+ return channel;
+
+ err_get_attr:
+- dpaa2_dpcon_free(priv, channel->dpcon);
++ free_dpcon(priv, channel->dpcon);
+ err_setup:
+ kfree(channel);
+ return NULL;
+ }
+
+-static void dpaa2_free_channel(struct dpaa2_eth_priv *priv,
+- struct dpaa2_eth_channel *channel)
++static void free_channel(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *channel)
+ {
+- dpaa2_dpcon_free(priv, channel->dpcon);
++ free_dpcon(priv, channel->dpcon);
+ kfree(channel);
+ }
+
+-static int dpaa2_dpio_setup(struct dpaa2_eth_priv *priv)
++/* DPIO setup: allocate and configure QBMan channels, setup core affinity
++ * and register data availability notifications
++ */
++static int setup_dpio(struct dpaa2_eth_priv *priv)
+ {
+ struct dpaa2_io_notification_ctx *nctx;
+ struct dpaa2_eth_channel *channel;
+@@ -1652,7 +1663,7 @@ static int dpaa2_dpio_setup(struct dpaa2
+ cpumask_clear(&priv->dpio_cpumask);
+ for_each_online_cpu(i) {
+ /* Try to allocate a channel */
+- channel = dpaa2_alloc_channel(priv);
++ channel = alloc_channel(priv);
+ if (!channel)
+ goto err_alloc_ch;
+
+@@ -1660,7 +1671,7 @@ static int dpaa2_dpio_setup(struct dpaa2
+
+ nctx = &channel->nctx;
+ nctx->is_cdan = 1;
+- nctx->cb = dpaa2_eth_cdan_cb;
++ nctx->cb = cdan_cb;
+ nctx->id = channel->ch_id;
+ nctx->desired_cpu = i;
+
+@@ -1671,7 +1682,7 @@ static int dpaa2_dpio_setup(struct dpaa2
+ /* This core doesn't have an affine DPIO, but there's
+ * a chance another one does, so keep trying
+ */
+- dpaa2_free_channel(priv, channel);
++ free_channel(priv, channel);
+ continue;
+ }
+
+@@ -1693,7 +1704,7 @@ static int dpaa2_dpio_setup(struct dpaa2
+ cpumask_set_cpu(i, &priv->dpio_cpumask);
+ priv->num_channels++;
+
+- if (priv->num_channels == dpaa2_max_channels(priv))
++ if (priv->num_channels == dpaa2_eth_max_channels(priv))
+ break;
+ }
+
+@@ -1706,7 +1717,7 @@ static int dpaa2_dpio_setup(struct dpaa2
+
+ err_set_cdan:
+ dpaa2_io_service_deregister(NULL, nctx);
+- dpaa2_free_channel(priv, channel);
++ free_channel(priv, channel);
+ err_alloc_ch:
+ if (cpumask_empty(&priv->dpio_cpumask)) {
+ dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
+@@ -1717,7 +1728,7 @@ err_alloc_ch:
+ return 0;
+ }
+
+-static void dpaa2_dpio_free(struct dpaa2_eth_priv *priv)
++static void free_dpio(struct dpaa2_eth_priv *priv)
+ {
+ int i;
+ struct dpaa2_eth_channel *ch;
+@@ -1726,12 +1737,12 @@ static void dpaa2_dpio_free(struct dpaa2
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ dpaa2_io_service_deregister(NULL, &ch->nctx);
+- dpaa2_free_channel(priv, ch);
++ free_channel(priv, ch);
+ }
+ }
+
+-static struct dpaa2_eth_channel *
+-dpaa2_get_channel_by_cpu(struct dpaa2_eth_priv *priv, int cpu)
++static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
++ int cpu)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
+ int i;
+@@ -1748,11 +1759,11 @@ dpaa2_get_channel_by_cpu(struct dpaa2_et
+ return priv->channel[0];
+ }
+
+-static void dpaa2_set_fq_affinity(struct dpaa2_eth_priv *priv)
++static void set_fq_affinity(struct dpaa2_eth_priv *priv)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_eth_fq *fq;
+- int rx_cpu, txconf_cpu;
++ int rx_cpu, txc_cpu;
+ int i;
+
+ /* For each FQ, pick one channel/CPU to deliver frames to.
+@@ -1760,7 +1771,7 @@ static void dpaa2_set_fq_affinity(struct
+ * through direct user intervention.
+ */
+ rx_cpu = cpumask_first(&priv->dpio_cpumask);
+- txconf_cpu = cpumask_first(&priv->txconf_cpumask);
++ txc_cpu = cpumask_first(&priv->txconf_cpumask);
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+@@ -1768,20 +1779,56 @@ static void dpaa2_set_fq_affinity(struct
+ case DPAA2_RX_FQ:
+ case DPAA2_RX_ERR_FQ:
+ fq->target_cpu = rx_cpu;
+- cpumask_rr(rx_cpu, &priv->dpio_cpumask);
++ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
++ if (rx_cpu >= nr_cpu_ids)
++ rx_cpu = cpumask_first(&priv->dpio_cpumask);
+ break;
+ case DPAA2_TX_CONF_FQ:
+- fq->target_cpu = txconf_cpu;
+- cpumask_rr(txconf_cpu, &priv->txconf_cpumask);
++ fq->target_cpu = txc_cpu;
++ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask);
++ if (txc_cpu >= nr_cpu_ids)
++ txc_cpu = cpumask_first(&priv->txconf_cpumask);
+ break;
+ default:
+ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
+ }
+- fq->channel = dpaa2_get_channel_by_cpu(priv, fq->target_cpu);
++ fq->channel = get_affine_channel(priv, fq->target_cpu);
++ }
++}
++
++static void setup_fqs(struct dpaa2_eth_priv *priv)
++{
++ int i;
++
++ /* We have one TxConf FQ per Tx flow */
++ for (i = 0; i < priv->dpni_attrs.max_senders; i++) {
++ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
++ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
++ priv->fq[priv->num_fqs++].flowid = DPNI_NEW_FLOW_ID;
++ }
++
++ /* The number of Rx queues (Rx distribution width) may be different from
++ * the number of cores.
++ * We only support one traffic class for now.
++ */
++ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
++ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
++ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
++ priv->fq[priv->num_fqs++].flowid = (u16)i;
+ }
++
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ /* We have exactly one Rx error queue per DPNI */
++ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
++ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
++#endif
++
++ /* For each FQ, decide on which core to process incoming frames */
++ set_fq_affinity(priv);
+ }
+
+-static int dpaa2_dpbp_setup(struct dpaa2_eth_priv *priv)
++/* Allocate and configure one buffer pool for each interface */
++static int setup_dpbp(struct dpaa2_eth_priv *priv)
+ {
+ int err;
+ struct fsl_mc_device *dpbp_dev;
+@@ -1833,15 +1880,16 @@ err_open:
+ return err;
+ }
+
+-static void dpaa2_dpbp_free(struct dpaa2_eth_priv *priv)
++static void free_dpbp(struct dpaa2_eth_priv *priv)
+ {
+- __dpaa2_dpbp_free(priv);
++ drain_pool(priv);
+ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+ fsl_mc_object_free(priv->dpbp_dev);
+ }
+
+-static int dpaa2_dpni_setup(struct fsl_mc_device *ls_dev)
++/* Configure the DPNI object this interface is associated with */
++static int setup_dpni(struct fsl_mc_device *ls_dev)
+ {
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_eth_priv *priv;
+@@ -1854,7 +1902,7 @@ static int dpaa2_dpni_setup(struct fsl_m
+
+ priv->dpni_id = ls_dev->obj_desc.id;
+
+- /* and get a handle for the DPNI this interface is associate with */
++ /* get a handle for the DPNI object */
+ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_open() failed\n");
+@@ -1864,7 +1912,10 @@ static int dpaa2_dpni_setup(struct fsl_m
+ ls_dev->mc_io = priv->mc_io;
+ ls_dev->mc_handle = priv->mc_token;
+
+- dma_mem = kzalloc(DPAA2_EXT_CFG_SIZE, GFP_DMA | GFP_KERNEL);
++ /* Map a memory region which will be used by MC to pass us an
++ * attribute structure
++ */
++ dma_mem = kzalloc(DPAA2_EXT_CFG_SIZE, GFP_DMA | GFP_KERNEL);
+ if (!dma_mem)
+ goto err_alloc;
+
+@@ -1878,10 +1929,15 @@ static int dpaa2_dpni_setup(struct fsl_m
+
+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
+ &priv->dpni_attrs);
++
++ /* We'll check the return code after unmapping, as we need to
++ * do this anyway
++ */
++ dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova,
++ DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE);
++
+ if (err) {
+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
+- dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova,
+- DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE);
+ goto err_get_attr;
+ }
+
+@@ -1889,9 +1945,6 @@ static int dpaa2_dpni_setup(struct fsl_m
+ if (err)
+ goto err_dpni_ver;
+
+- dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova,
+- DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE);
+-
+ memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg));
+ err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem);
+ if (err) {
+@@ -1909,15 +1962,15 @@ static int dpaa2_dpni_setup(struct fsl_m
+ priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
+ /* HW erratum mandates data alignment in multiples of 256 */
+ priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
+- /* ...rx, ... */
++
++ /* rx buffer */
+ err = dpni_set_rx_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ &priv->buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_buffer_layout() failed");
+ goto err_buf_layout;
+ }
+- /* ... tx, ... */
+- /* remove Rx-only options */
++ /* tx buffer: remove Rx-only options */
+ priv->buf_layout.options &= ~(DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT);
+ err = dpni_set_tx_buffer_layout(priv->mc_io, 0, priv->mc_token,
+@@ -1926,7 +1979,7 @@ static int dpaa2_dpni_setup(struct fsl_m
+ dev_err(dev, "dpni_set_tx_buffer_layout() failed");
+ goto err_buf_layout;
+ }
+- /* ... tx-confirm. */
++ /* tx-confirm: same options as tx */
+ priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+ priv->buf_layout.options |= DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ priv->buf_layout.pass_timestamp = 1;
+@@ -1946,8 +1999,9 @@ static int dpaa2_dpni_setup(struct fsl_m
+ goto err_data_offset;
+ }
+
+- /* Warn in case TX data offset is not multiple of 64 bytes. */
+- WARN_ON(priv->tx_data_offset % 64);
++ if ((priv->tx_data_offset % 64) != 0)
++ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
++ priv->tx_data_offset);
+
+ /* Accommodate SWA space. */
+ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
+@@ -1976,7 +2030,7 @@ err_open:
+ return err;
+ }
+
+-static void dpaa2_dpni_free(struct dpaa2_eth_priv *priv)
++static void free_dpni(struct dpaa2_eth_priv *priv)
+ {
+ int err;
+
+@@ -1988,8 +2042,8 @@ static void dpaa2_dpni_free(struct dpaa2
+ dpni_close(priv->mc_io, 0, priv->mc_token);
+ }
+
+-static int dpaa2_rx_flow_setup(struct dpaa2_eth_priv *priv,
+- struct dpaa2_eth_fq *fq)
++static int setup_rx_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue_attr rx_queue_attr;
+@@ -2023,8 +2077,8 @@ static int dpaa2_rx_flow_setup(struct dp
+ return 0;
+ }
+
+-static int dpaa2_tx_flow_setup(struct dpaa2_eth_priv *priv,
+- struct dpaa2_eth_fq *fq)
++static int setup_tx_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
+ {
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_tx_flow_cfg tx_flow_cfg;
+@@ -2070,15 +2124,16 @@ static int dpaa2_tx_flow_setup(struct dp
+ }
+
+ #ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
+-static int dpaa2_rx_err_setup(struct dpaa2_eth_priv *priv,
+- struct dpaa2_eth_fq *fq)
++static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
+ {
+ struct dpni_queue_attr queue_attr;
+ struct dpni_queue_cfg queue_cfg;
+ int err;
+
+ /* Configure the Rx error queue to generate CDANs,
+- * just like the Rx queues */
++ * just like the Rx queues
++ */
+ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON;
+ queue_cfg.dest_cfg.priority = 1;
+@@ -2091,7 +2146,8 @@ static int dpaa2_rx_err_setup(struct dpa
+ }
+
+ /* Get the FQID */
+- err = dpni_get_rx_err_queue(priv->mc_io, 0, priv->mc_token, &queue_attr);
++ err = dpni_get_rx_err_queue(priv->mc_io, 0, priv->mc_token,
++ &queue_attr);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_get_rx_err_queue() failed\n");
+ return err;
+@@ -2102,7 +2158,10 @@ static int dpaa2_rx_err_setup(struct dpa
+ }
+ #endif
+
+-static int dpaa2_dpni_bind(struct dpaa2_eth_priv *priv)
++/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
++ * frame queues and channels
++ */
++static int bind_dpni(struct dpaa2_eth_priv *priv)
+ {
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+@@ -2114,20 +2173,20 @@ static int dpaa2_dpni_bind(struct dpaa2_
+ pools_params.num_dpbp = 1;
+ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
+ pools_params.pools[0].backup_pool = 0;
+- pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUFFER_SIZE;
++ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+ if (err) {
+ dev_err(dev, "dpni_set_pools() failed\n");
+ return err;
+ }
+
+- dpaa2_cls_check(net_dev);
++ check_fs_support(net_dev);
+
+ /* have the interface implicitly distribute traffic based on supported
+ * header fields
+ */
+ if (dpaa2_eth_hash_enabled(priv)) {
+- err = dpaa2_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
++ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
+ if (err)
+ return err;
+ }
+@@ -2151,14 +2210,14 @@ static int dpaa2_dpni_bind(struct dpaa2_
+ for (i = 0; i < priv->num_fqs; i++) {
+ switch (priv->fq[i].type) {
+ case DPAA2_RX_FQ:
+- err = dpaa2_rx_flow_setup(priv, &priv->fq[i]);
++ err = setup_rx_flow(priv, &priv->fq[i]);
+ break;
+ case DPAA2_TX_CONF_FQ:
+- err = dpaa2_tx_flow_setup(priv, &priv->fq[i]);
++ err = setup_tx_flow(priv, &priv->fq[i]);
+ break;
+ #ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
+ case DPAA2_RX_ERR_FQ:
+- err = dpaa2_rx_err_setup(priv, &priv->fq[i]);
++ err = setup_rx_err_flow(priv, &priv->fq[i]);
+ break;
+ #endif
+ default:
+@@ -2178,7 +2237,8 @@ static int dpaa2_dpni_bind(struct dpaa2_
+ return 0;
+ }
+
+-static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
++/* Allocate rings for storing incoming frame descriptors */
++static int alloc_rings(struct dpaa2_eth_priv *priv)
+ {
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+@@ -2205,7 +2265,7 @@ err_ring:
+ return -ENOMEM;
+ }
+
+-static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
++static void free_rings(struct dpaa2_eth_priv *priv)
+ {
+ int i;
+
+@@ -2213,7 +2273,7 @@ static void dpaa2_eth_free_rings(struct
+ dpaa2_io_store_destroy(priv->channel[i]->store);
+ }
+
+-static int dpaa2_eth_netdev_init(struct net_device *net_dev)
++static int netdev_init(struct net_device *net_dev)
+ {
+ int err;
+ struct device *dev = net_dev->dev.parent;
+@@ -2223,7 +2283,9 @@ static int dpaa2_eth_netdev_init(struct
+
+ net_dev->netdev_ops = &dpaa2_eth_ops;
+
+- /* If the DPL contains all-0 mac_addr, set a random hardware address */
++ /* If the DPNI attributes contain an all-0 mac_addr,
++ * set a random hardware address
++ */
+ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ mac_addr);
+ if (err) {
+@@ -2281,14 +2343,13 @@ static int dpaa2_eth_netdev_init(struct
+ return 0;
+ }
+
+-#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL
+-static int dpaa2_poll_link_state(void *arg)
++static int poll_link_state(void *arg)
+ {
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
+ int err;
+
+ while (!kthread_should_stop()) {
+- err = dpaa2_link_state_update(priv);
++ err = link_state_update(priv);
+ if (unlikely(err))
+ return err;
+
+@@ -2297,7 +2358,7 @@ static int dpaa2_poll_link_state(void *a
+
+ return 0;
+ }
+-#else
++
+ static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
+ {
+ return IRQ_WAKE_THREAD;
+@@ -2312,7 +2373,6 @@ static irqreturn_t dpni_irq0_handler_thr
+ struct net_device *net_dev = dev_get_drvdata(dev);
+ int err;
+
+- netdev_dbg(net_dev, "IRQ %d received\n", irq_num);
+ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
+ irq_index, &status);
+ if (unlikely(err)) {
+@@ -2323,7 +2383,7 @@ static irqreturn_t dpni_irq0_handler_thr
+
+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
+ clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
+- dpaa2_link_state_update(netdev_priv(net_dev));
++ link_state_update(netdev_priv(net_dev));
+ }
+
+ out:
+@@ -2332,17 +2392,18 @@ out:
+ return IRQ_HANDLED;
+ }
+
+-static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
++static int setup_irqs(struct fsl_mc_device *ls_dev)
+ {
+ int err = 0;
+ struct fsl_mc_device_irq *irq;
+- int irq_count = ls_dev->obj_desc.irq_count;
+ u8 irq_index = DPNI_IRQ_INDEX;
+ u32 mask = DPNI_IRQ_EVENT_LINK_CHANGED;
+
+- /* The only interrupt supported now is the link state notification. */
+- if (WARN_ON(irq_count != 1))
+- return -EINVAL;
++ err = fsl_mc_allocate_irqs(ls_dev);
++ if (err) {
++ dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
++ return err;
++ }
+
+ irq = ls_dev->irqs[0];
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
+@@ -2352,28 +2413,34 @@ static int dpaa2_eth_setup_irqs(struct f
+ dev_name(&ls_dev->dev), &ls_dev->dev);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
+- return err;
++ goto free_mc_irq;
+ }
+
+ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
+ irq_index, mask);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
+- return err;
++ goto free_irq;
+ }
+
+ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
+ irq_index, 1);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
+- return err;
++ goto free_irq;
+ }
+
+ return 0;
++
++free_irq:
++ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
++free_mc_irq:
++ fsl_mc_free_irqs(ls_dev);
++
++ return err;
+ }
+-#endif
+
+-static void dpaa2_eth_napi_add(struct dpaa2_eth_priv *priv)
++static void add_ch_napi(struct dpaa2_eth_priv *priv)
+ {
+ int i;
+ struct dpaa2_eth_channel *ch;
+@@ -2386,7 +2453,7 @@ static void dpaa2_eth_napi_add(struct dp
+ }
+ }
+
+-static void dpaa2_eth_napi_del(struct dpaa2_eth_priv *priv)
++static void del_ch_napi(struct dpaa2_eth_priv *priv)
+ {
+ int i;
+ struct dpaa2_eth_channel *ch;
+@@ -2398,7 +2465,6 @@ static void dpaa2_eth_napi_del(struct dp
+ }
+
+ /* SysFS support */
+-
+ static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+@@ -2482,22 +2548,21 @@ static ssize_t dpaa2_eth_write_txconf_cp
+ }
+
+ /* Set the new TxConf FQ affinities */
+- dpaa2_set_fq_affinity(priv);
++ set_fq_affinity(priv);
+
+-#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL
+ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit
+ * link up notification is received. Give the polling thread enough time
+ * to detect the link state change, or else we'll end up with the
+ * transmission side forever shut down.
+ */
+- msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
+-#endif
++ if (priv->do_link_poll)
++ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_TX_CONF_FQ)
+ continue;
+- dpaa2_tx_flow_setup(priv, fq);
++ setup_tx_flow(priv, fq);
+ }
+
+ if (running) {
+@@ -2568,7 +2633,6 @@ static int dpaa2_eth_probe(struct fsl_mc
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+- priv->msg_enable = netif_msg_init(debug, -1);
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+@@ -2578,39 +2642,27 @@ static int dpaa2_eth_probe(struct fsl_mc
+ goto err_portal_alloc;
+ }
+
+-#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL
+- err = fsl_mc_allocate_irqs(dpni_dev);
+- if (err) {
+- dev_err(dev, "MC irqs allocation failed\n");
+- goto err_irqs_alloc;
+- }
+-#endif
+-
+- /* DPNI initialization */
+- err = dpaa2_dpni_setup(dpni_dev);
+- if (err < 0)
++ /* MC objects initialization and configuration */
++ err = setup_dpni(dpni_dev);
++ if (err)
+ goto err_dpni_setup;
+
+- /* DPIO */
+- err = dpaa2_dpio_setup(priv);
++ err = setup_dpio(priv);
+ if (err)
+ goto err_dpio_setup;
+
+- /* FQs */
+- dpaa2_eth_setup_fqs(priv);
+- dpaa2_set_fq_affinity(priv);
++ setup_fqs(priv);
+
+- /* DPBP */
+- err = dpaa2_dpbp_setup(priv);
++ err = setup_dpbp(priv);
+ if (err)
+ goto err_dpbp_setup;
+
+- /* DPNI binding to DPIO and DPBPs */
+- err = dpaa2_dpni_bind(priv);
++ err = bind_dpni(priv);
+ if (err)
+ goto err_bind;
+
+- dpaa2_eth_napi_add(priv);
++ /* Add a NAPI context for each channel */
++ add_ch_napi(priv);
+
+ /* Percpu statistics */
+ priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
+@@ -2635,38 +2687,37 @@ static int dpaa2_eth_probe(struct fsl_mc
+ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name);
+ }
+
+- err = dpaa2_eth_netdev_init(net_dev);
++ err = netdev_init(net_dev);
+ if (err)
+ goto err_netdev_init;
+
+ /* Configure checksum offload based on current interface flags */
+- err = dpaa2_eth_set_rx_csum(priv,
+- !!(net_dev->features & NETIF_F_RXCSUM));
++ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
+ if (err)
+ goto err_csum;
+
+- err = dpaa2_eth_set_tx_csum(priv,
+- !!(net_dev->features &
+- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
++ err = set_tx_csum(priv, !!(net_dev->features &
++ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
+ if (err)
+ goto err_csum;
+
+- err = dpaa2_eth_alloc_rings(priv);
++ err = alloc_rings(priv);
+ if (err)
+ goto err_alloc_rings;
+
+ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
+
+-#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL
+- priv->poll_thread = kthread_run(dpaa2_poll_link_state, priv,
+- "%s_poll_link", net_dev->name);
+-#else
+- err = dpaa2_eth_setup_irqs(dpni_dev);
++ err = setup_irqs(dpni_dev);
+ if (err) {
+- netdev_err(net_dev, "ERROR %d setting up interrupts", err);
+- goto err_setup_irqs;
++ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
++ priv->poll_thread = kthread_run(poll_link_state, priv,
++ "%s_poll_link", net_dev->name);
++ if (IS_ERR(priv->poll_thread)) {
++ netdev_err(net_dev, "Error starting polling thread\n");
++ goto err_poll_thread;
++ }
++ priv->do_link_poll = true;
+ }
+-#endif
+
+ dpaa2_eth_sysfs_init(&net_dev->dev);
+ dpaa2_dbg_add(priv);
+@@ -2674,10 +2725,8 @@ static int dpaa2_eth_probe(struct fsl_mc
+ dev_info(dev, "Probed interface %s\n", net_dev->name);
+ return 0;
+
+-#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL
+-err_setup_irqs:
+-#endif
+- dpaa2_eth_free_rings(priv);
++err_poll_thread:
++ free_rings(priv);
+ err_alloc_rings:
+ err_csum:
+ unregister_netdev(net_dev);
+@@ -2686,19 +2735,15 @@ err_netdev_init:
+ err_alloc_percpu_extras:
+ free_percpu(priv->percpu_stats);
+ err_alloc_percpu_stats:
+- dpaa2_eth_napi_del(priv);
++ del_ch_napi(priv);
+ err_bind:
+- dpaa2_dpbp_free(priv);
++ free_dpbp(priv);
+ err_dpbp_setup:
+- dpaa2_dpio_free(priv);
++ free_dpio(priv);
+ err_dpio_setup:
+ kfree(priv->cls_rule);
+ dpni_close(priv->mc_io, 0, priv->mc_token);
+ err_dpni_setup:
+-#ifndef CONFIG_FSL_DPAA2_ETH_LINK_POLL
+- fsl_mc_free_irqs(dpni_dev);
+-err_irqs_alloc:
+-#endif
+ fsl_mc_portal_free(priv->mc_io);
+ err_portal_alloc:
+ dev_set_drvdata(dev, NULL);
+@@ -2723,22 +2768,21 @@ static int dpaa2_eth_remove(struct fsl_m
+ unregister_netdev(net_dev);
+ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
+
+- dpaa2_dpio_free(priv);
+- dpaa2_eth_free_rings(priv);
+- dpaa2_eth_napi_del(priv);
+- dpaa2_dpbp_free(priv);
+- dpaa2_dpni_free(priv);
++ free_dpio(priv);
++ free_rings(priv);
++ del_ch_napi(priv);
++ free_dpbp(priv);
++ free_dpni(priv);
+
+ fsl_mc_portal_free(priv->mc_io);
+
+ free_percpu(priv->percpu_stats);
+ free_percpu(priv->percpu_extras);
+
+-#ifdef CONFIG_FSL_DPAA2_ETH_LINK_POLL
+- kthread_stop(priv->poll_thread);
+-#else
+- fsl_mc_free_irqs(ls_dev);
+-#endif
++ if (priv->do_link_poll)
++ kthread_stop(priv->poll_thread);
++ else
++ fsl_mc_free_irqs(ls_dev);
+
+ kfree(priv->cls_rule);
+
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+@@ -49,8 +49,10 @@
+
+ #define DPAA2_ETH_STORE_SIZE 16
+
+-/* Maximum receive frame size is 64K */
+-#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUFFER_SIZE)
++/* Maximum number of scatter-gather entries in an ingress frame,
++ * considering the maximum receive frame size is 64K
++ */
++#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
+
+ /* Maximum acceptable MTU value. It is in direct relation with the MC-enforced
+ * Max Frame Length (currently 10k).
+@@ -75,17 +77,26 @@
+ #define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
+ #define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE
+
++/* Maximum number of buffers that can be acquired/released through a single
++ * QBMan command
++ */
++#define DPAA2_ETH_BUFS_PER_CMD 7
++
+ /* Hardware requires alignment for ingress/egress buffer addresses
+ * and ingress buffer lengths.
+ */
+-#define DPAA2_ETH_RX_BUFFER_SIZE 2048
++#define DPAA2_ETH_RX_BUF_SIZE 2048
+ #define DPAA2_ETH_TX_BUF_ALIGN 64
+ #define DPAA2_ETH_RX_BUF_ALIGN 256
+ #define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
+ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
+
++/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
++ * buffers large enough to allow building an skb around them and also account
++ * for alignment restrictions
++ */
+ #define DPAA2_ETH_BUF_RAW_SIZE \
+- (DPAA2_ETH_RX_BUFFER_SIZE + \
++ (DPAA2_ETH_RX_BUF_SIZE + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
+ DPAA2_ETH_RX_BUF_ALIGN)
+
+@@ -127,57 +138,56 @@ struct dpaa2_fas {
+ __le32 status;
+ } __packed;
+
++/* Error and status bits in the frame annotation status word */
+ /* Debug frame, otherwise supposed to be discarded */
+-#define DPAA2_ETH_FAS_DISC 0x80000000
++#define DPAA2_FAS_DISC 0x80000000
+ /* MACSEC frame */
+-#define DPAA2_ETH_FAS_MS 0x40000000
+-#define DPAA2_ETH_FAS_PTP 0x08000000
++#define DPAA2_FAS_MS 0x40000000
++#define DPAA2_FAS_PTP 0x08000000
+ /* Ethernet multicast frame */
+-#define DPAA2_ETH_FAS_MC 0x04000000
++#define DPAA2_FAS_MC 0x04000000
+ /* Ethernet broadcast frame */
+-#define DPAA2_ETH_FAS_BC 0x02000000
+-#define DPAA2_ETH_FAS_KSE 0x00040000
+-#define DPAA2_ETH_FAS_EOFHE 0x00020000
+-#define DPAA2_ETH_FAS_MNLE 0x00010000
+-#define DPAA2_ETH_FAS_TIDE 0x00008000
+-#define DPAA2_ETH_FAS_PIEE 0x00004000
++#define DPAA2_FAS_BC 0x02000000
++#define DPAA2_FAS_KSE 0x00040000
++#define DPAA2_FAS_EOFHE 0x00020000
++#define DPAA2_FAS_MNLE 0x00010000
++#define DPAA2_FAS_TIDE 0x00008000
++#define DPAA2_FAS_PIEE 0x00004000
+ /* Frame length error */
+-#define DPAA2_ETH_FAS_FLE 0x00002000
+-/* Frame physical error; our favourite pastime */
+-#define DPAA2_ETH_FAS_FPE 0x00001000
+-#define DPAA2_ETH_FAS_PTE 0x00000080
+-#define DPAA2_ETH_FAS_ISP 0x00000040
+-#define DPAA2_ETH_FAS_PHE 0x00000020
+-#define DPAA2_ETH_FAS_BLE 0x00000010
++#define DPAA2_FAS_FLE 0x00002000
++/* Frame physical error */
++#define DPAA2_FAS_FPE 0x00001000
++#define DPAA2_FAS_PTE 0x00000080
++#define DPAA2_FAS_ISP 0x00000040
++#define DPAA2_FAS_PHE 0x00000020
++#define DPAA2_FAS_BLE 0x00000010
+ /* L3 csum validation performed */
+-#define DPAA2_ETH_FAS_L3CV 0x00000008
++#define DPAA2_FAS_L3CV 0x00000008
+ /* L3 csum error */
+-#define DPAA2_ETH_FAS_L3CE 0x00000004
++#define DPAA2_FAS_L3CE 0x00000004
+ /* L4 csum validation performed */
+-#define DPAA2_ETH_FAS_L4CV 0x00000002
++#define DPAA2_FAS_L4CV 0x00000002
+ /* L4 csum error */
+-#define DPAA2_ETH_FAS_L4CE 0x00000001
+-/* These bits always signal errors */
+-#define DPAA2_ETH_RX_ERR_MASK (DPAA2_ETH_FAS_KSE | \
+- DPAA2_ETH_FAS_EOFHE | \
+- DPAA2_ETH_FAS_MNLE | \
+- DPAA2_ETH_FAS_TIDE | \
+- DPAA2_ETH_FAS_PIEE | \
+- DPAA2_ETH_FAS_FLE | \
+- DPAA2_ETH_FAS_FPE | \
+- DPAA2_ETH_FAS_PTE | \
+- DPAA2_ETH_FAS_ISP | \
+- DPAA2_ETH_FAS_PHE | \
+- DPAA2_ETH_FAS_BLE | \
+- DPAA2_ETH_FAS_L3CE | \
+- DPAA2_ETH_FAS_L4CE)
+-/* Unsupported features in the ingress */
+-#define DPAA2_ETH_RX_UNSUPP_MASK DPAA2_ETH_FAS_MS
++#define DPAA2_FAS_L4CE 0x00000001
++/* Possible errors on the ingress path */
++#define DPAA2_ETH_RX_ERR_MASK (DPAA2_FAS_KSE | \
++ DPAA2_FAS_EOFHE | \
++ DPAA2_FAS_MNLE | \
++ DPAA2_FAS_TIDE | \
++ DPAA2_FAS_PIEE | \
++ DPAA2_FAS_FLE | \
++ DPAA2_FAS_FPE | \
++ DPAA2_FAS_PTE | \
++ DPAA2_FAS_ISP | \
++ DPAA2_FAS_PHE | \
++ DPAA2_FAS_BLE | \
++ DPAA2_FAS_L3CE | \
++ DPAA2_FAS_L4CE)
+ /* Tx errors */
+-#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_ETH_FAS_KSE | \
+- DPAA2_ETH_FAS_EOFHE | \
+- DPAA2_ETH_FAS_MNLE | \
+- DPAA2_ETH_FAS_TIDE)
++#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_FAS_KSE | \
++ DPAA2_FAS_EOFHE | \
++ DPAA2_FAS_MNLE | \
++ DPAA2_FAS_TIDE)
+
+ /* Time in milliseconds between link state updates */
+ #define DPAA2_ETH_LINK_STATE_REFRESH 1000
+@@ -185,7 +195,7 @@ struct dpaa2_fas {
+ /* Driver statistics, other than those in struct rtnl_link_stats64.
+ * These are usually collected per-CPU and aggregated by ethtool.
+ */
+-struct dpaa2_eth_stats {
++struct dpaa2_eth_drv_stats {
+ __u64 tx_conf_frames;
+ __u64 tx_conf_bytes;
+ __u64 tx_sg_frames;
+@@ -210,15 +220,17 @@ struct dpaa2_eth_ch_stats {
+ __u64 cdan;
+ /* Number of frames received on queues from this channel */
+ __u64 frames;
++ /* Pull errors */
++ __u64 pull_err;
+ };
+
+-/* Maximum number of Rx queues associated with a DPNI */
++/* Maximum number of queues associated with a DPNI */
+ #define DPAA2_ETH_MAX_RX_QUEUES 16
+ #define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
+ #define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
+-#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
+- DPAA2_ETH_MAX_TX_QUEUES + \
+- DPAA2_ETH_MAX_RX_ERR_QUEUES)
++#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
++ DPAA2_ETH_MAX_TX_QUEUES + \
++ DPAA2_ETH_MAX_RX_ERR_QUEUES)
+
+ #define DPAA2_ETH_MAX_DPCONS NR_CPUS
+
+@@ -241,7 +253,6 @@ struct dpaa2_eth_fq {
+ struct dpaa2_eth_channel *,
+ const struct dpaa2_fd *,
+ struct napi_struct *);
+- struct dpaa2_eth_priv *netdev_priv; /* backpointer */
+ struct dpaa2_eth_fq_stats stats;
+ };
+
+@@ -258,16 +269,16 @@ struct dpaa2_eth_channel {
+ struct dpaa2_eth_ch_stats stats;
+ };
+
+-struct dpaa2_cls_rule {
++struct dpaa2_eth_cls_rule {
+ struct ethtool_rx_flow_spec fs;
+ bool in_use;
+ };
+
++/* Driver private data */
+ struct dpaa2_eth_priv {
+ struct net_device *net_dev;
+
+ u8 num_fqs;
+- /* First queue is tx conf, the rest are rx */
+ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
+
+ u8 num_channels;
+@@ -299,12 +310,12 @@ struct dpaa2_eth_priv {
+ /* Standard statistics */
+ struct rtnl_link_stats64 __percpu *percpu_stats;
+ /* Extra stats, in addition to the ones known by the kernel */
+- struct dpaa2_eth_stats __percpu *percpu_extras;
+- u32 msg_enable; /* net_device message level */
++ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
+
+ u16 mc_token;
+
+ struct dpni_link_state link_state;
++ bool do_link_poll;
+ struct task_struct *poll_thread;
+
+ /* enabled ethtool hashing bits */
+@@ -315,7 +326,7 @@ struct dpaa2_eth_priv {
+ #endif
+
+ /* array of classification rules */
+- struct dpaa2_cls_rule *cls_rule;
++ struct dpaa2_eth_cls_rule *cls_rule;
+
+ struct dpni_tx_shaping_cfg shaping_cfg;
+
+@@ -341,9 +352,9 @@ struct dpaa2_eth_priv {
+
+ extern const struct ethtool_ops dpaa2_ethtool_ops;
+
+-int dpaa2_set_hash(struct net_device *net_dev, u64 flags);
++int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
+
+-static int dpaa2_queue_count(struct dpaa2_eth_priv *priv)
++static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
+ {
+ if (!dpaa2_eth_hash_enabled(priv))
+ return 1;
+@@ -351,16 +362,16 @@ static int dpaa2_queue_count(struct dpaa
+ return priv->dpni_ext_cfg.tc_cfg[0].max_dist;
+ }
+
+-static inline int dpaa2_max_channels(struct dpaa2_eth_priv *priv)
++static inline int dpaa2_eth_max_channels(struct dpaa2_eth_priv *priv)
+ {
+ /* Ideally, we want a number of channels large enough
+ * to accommodate both the Rx distribution size
+ * and the max number of Tx confirmation queues
+ */
+- return max_t(int, dpaa2_queue_count(priv),
++ return max_t(int, dpaa2_eth_queue_count(priv),
+ priv->dpni_attrs.max_senders);
+ }
+
+-void dpaa2_cls_check(struct net_device *);
++void check_fs_support(struct net_device *);
+
+ #endif /* __DPAA2_H */
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+@@ -52,7 +52,7 @@ char dpaa2_ethtool_stats[][ETH_GSTRING_L
+
+ #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
+
+-/* To be kept in sync with 'struct dpaa2_eth_stats' */
++/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */
+ char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
+ /* per-cpu stats */
+
+@@ -63,12 +63,12 @@ char dpaa2_ethtool_extras[][ETH_GSTRING_
+ "rx sg frames",
+ "rx sg bytes",
+ /* how many times we had to retry the enqueue command */
+- "tx portal busy",
++ "enqueue portal busy",
+
+ /* Channel stats */
+-
+ /* How many times we had to retry the volatile dequeue command */
+- "portal busy",
++ "dequeue portal busy",
++ "channel pull errors",
+ /* Number of notifications received */
+ "cdan",
+ #ifdef CONFIG_FSL_QBMAN_DEBUG
+@@ -83,8 +83,8 @@ char dpaa2_ethtool_extras[][ETH_GSTRING_
+
+ #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
+
+-static void dpaa2_get_drvinfo(struct net_device *net_dev,
+- struct ethtool_drvinfo *drvinfo)
++static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
++ struct ethtool_drvinfo *drvinfo)
+ {
+ struct mc_version mc_ver;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+@@ -112,20 +112,8 @@ static void dpaa2_get_drvinfo(struct net
+ sizeof(drvinfo->bus_info));
+ }
+
+-static u32 dpaa2_get_msglevel(struct net_device *net_dev)
+-{
+- return ((struct dpaa2_eth_priv *)netdev_priv(net_dev))->msg_enable;
+-}
+-
+-static void dpaa2_set_msglevel(struct net_device *net_dev,
+- u32 msg_enable)
+-{
+- ((struct dpaa2_eth_priv *)netdev_priv(net_dev))->msg_enable =
+- msg_enable;
+-}
+-
+-static int dpaa2_get_settings(struct net_device *net_dev,
+- struct ethtool_cmd *cmd)
++static int dpaa2_eth_get_settings(struct net_device *net_dev,
++ struct ethtool_cmd *cmd)
+ {
+ struct dpni_link_state state = {0};
+ int err = 0;
+@@ -152,8 +140,8 @@ out:
+ return err;
+ }
+
+-static int dpaa2_set_settings(struct net_device *net_dev,
+- struct ethtool_cmd *cmd)
++static int dpaa2_eth_set_settings(struct net_device *net_dev,
++ struct ethtool_cmd *cmd)
+ {
+ struct dpni_link_cfg cfg = {0};
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+@@ -190,8 +178,8 @@ static int dpaa2_set_settings(struct net
+ return err;
+ }
+
+-static void dpaa2_get_strings(struct net_device *netdev, u32 stringset,
+- u8 *data)
++static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
++ u8 *data)
+ {
+ u8 *p = data;
+ int i;
+@@ -210,7 +198,7 @@ static void dpaa2_get_strings(struct net
+ }
+ }
+
+-static int dpaa2_get_sset_count(struct net_device *net_dev, int sset)
++static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
+ {
+ switch (sset) {
+ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
+@@ -222,9 +210,9 @@ static int dpaa2_get_sset_count(struct n
+
+ /** Fill in hardware counters, as returned by the MC firmware.
+ */
+-static void dpaa2_get_ethtool_stats(struct net_device *net_dev,
+- struct ethtool_stats *stats,
+- u64 *data)
++static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
++ struct ethtool_stats *stats,
++ u64 *data)
+ {
+ int i; /* Current index in the data array */
+ int j, k, err;
+@@ -236,9 +224,9 @@ static void dpaa2_get_ethtool_stats(stru
+ u32 buf_cnt;
+ #endif
+ u64 cdan = 0;
+- u64 portal_busy = 0;
++ u64 portal_busy = 0, pull_err = 0;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+- struct dpaa2_eth_stats *extras;
++ struct dpaa2_eth_drv_stats *extras;
+ struct dpaa2_eth_ch_stats *ch_stats;
+
+ memset(data, 0,
+@@ -266,16 +254,18 @@ static void dpaa2_get_ethtool_stats(stru
+ ch_stats = &priv->channel[j]->stats;
+ cdan += ch_stats->cdan;
+ portal_busy += ch_stats->dequeue_portal_busy;
++ pull_err += ch_stats->pull_err;
+ }
+
+ *(data + i++) = portal_busy;
++ *(data + i++) = pull_err;
+ *(data + i++) = cdan;
+
+ #ifdef CONFIG_FSL_QBMAN_DEBUG
+ for (j = 0; j < priv->num_fqs; j++) {
+ /* Print FQ instantaneous counts */
+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
+- &fcnt, &bcnt);
++ &fcnt, &bcnt);
+ if (err) {
+ netdev_warn(net_dev, "FQ query error %d", err);
+ return;
+@@ -303,12 +293,12 @@ static void dpaa2_get_ethtool_stats(stru
+ #endif
+ }
+
+-static const struct dpaa2_hash_fields {
++static const struct dpaa2_eth_hash_fields {
+ u64 rxnfc_field;
+ enum net_prot cls_prot;
+ int cls_field;
+ int size;
+-} dpaa2_hash_fields[] = {
++} hash_fields[] = {
+ {
+ /* L2 header */
+ .rxnfc_field = RXH_L2DA,
+@@ -353,55 +343,53 @@ static const struct dpaa2_hash_fields {
+ },
+ };
+
+-static int dpaa2_cls_is_enabled(struct net_device *net_dev, u64 flag)
++static int cls_is_enabled(struct net_device *net_dev, u64 flag)
+ {
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return !!(priv->rx_hash_fields & flag);
+ }
+
+-static int dpaa2_cls_key_off(struct net_device *net_dev, u64 flag)
++static int cls_key_off(struct net_device *net_dev, u64 flag)
+ {
+ int i, off = 0;
+
+- for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) {
+- if (dpaa2_hash_fields[i].rxnfc_field & flag)
++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
++ if (hash_fields[i].rxnfc_field & flag)
+ return off;
+- if (dpaa2_cls_is_enabled(net_dev,
+- dpaa2_hash_fields[i].rxnfc_field))
+- off += dpaa2_hash_fields[i].size;
++ if (cls_is_enabled(net_dev, hash_fields[i].rxnfc_field))
++ off += hash_fields[i].size;
+ }
+
+ return -1;
+ }
+
+-static u8 dpaa2_cls_key_size(struct net_device *net_dev)
++static u8 cls_key_size(struct net_device *net_dev)
+ {
+ u8 i, size = 0;
+
+- for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) {
+- if (!dpaa2_cls_is_enabled(net_dev,
+- dpaa2_hash_fields[i].rxnfc_field))
++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
++ if (!cls_is_enabled(net_dev, hash_fields[i].rxnfc_field))
+ continue;
+- size += dpaa2_hash_fields[i].size;
++ size += hash_fields[i].size;
+ }
+
+ return size;
+ }
+
+-static u8 dpaa2_cls_max_key_size(struct net_device *net_dev)
++static u8 cls_max_key_size(struct net_device *net_dev)
+ {
+ u8 i, size = 0;
+
+- for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++)
+- size += dpaa2_hash_fields[i].size;
++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++)
++ size += hash_fields[i].size;
+
+ return size;
+ }
+
+-void dpaa2_cls_check(struct net_device *net_dev)
++void check_fs_support(struct net_device *net_dev)
+ {
+- u8 key_size = dpaa2_cls_max_key_size(net_dev);
++ u8 key_size = cls_max_key_size(net_dev);
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (priv->dpni_attrs.options & DPNI_OPT_DIST_FS &&
+@@ -417,7 +405,7 @@ void dpaa2_cls_check(struct net_device *
+ /* Set RX hash options
+ * flags is a combination of RXH_ bits
+ */
+-int dpaa2_set_hash(struct net_device *net_dev, u64 flags)
++int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+ {
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+@@ -441,11 +429,11 @@ int dpaa2_set_hash(struct net_device *ne
+
+ memset(&cls_cfg, 0, sizeof(cls_cfg));
+
+- for (i = 0; i < ARRAY_SIZE(dpaa2_hash_fields); i++) {
++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
+ struct dpkg_extract *key =
+ &cls_cfg.extracts[cls_cfg.num_extracts];
+
+- if (!(flags & dpaa2_hash_fields[i].rxnfc_field))
++ if (!(flags & hash_fields[i].rxnfc_field))
+ continue;
+
+ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+@@ -454,14 +442,12 @@ int dpaa2_set_hash(struct net_device *ne
+ }
+
+ key->type = DPKG_EXTRACT_FROM_HDR;
+- key->extract.from_hdr.prot =
+- dpaa2_hash_fields[i].cls_prot;
++ key->extract.from_hdr.prot = hash_fields[i].cls_prot;
+ key->extract.from_hdr.type = DPKG_FULL_FIELD;
+- key->extract.from_hdr.field =
+- dpaa2_hash_fields[i].cls_field;
++ key->extract.from_hdr.field = hash_fields[i].cls_field;
+ cls_cfg.num_extracts++;
+
+- enabled_flags |= dpaa2_hash_fields[i].rxnfc_field;
++ enabled_flags |= hash_fields[i].rxnfc_field;
+ }
+
+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
+@@ -486,7 +472,7 @@ int dpaa2_set_hash(struct net_device *ne
+ return -ENOMEM;
+ }
+
+- dist_cfg.dist_size = dpaa2_queue_count(priv);
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ if (dpaa2_eth_fs_enabled(priv)) {
+ dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
+ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
+@@ -508,14 +494,14 @@ int dpaa2_set_hash(struct net_device *ne
+ return 0;
+ }
+
+-static int dpaa2_cls_prep_rule(struct net_device *net_dev,
+- struct ethtool_rx_flow_spec *fs,
+- void *key)
++static int prep_cls_rule(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ void *key)
+ {
+ struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m;
+ struct ethhdr *eth_h, *eth_m;
+ struct ethtool_flow_ext *ext_h, *ext_m;
+- const u8 key_size = dpaa2_cls_key_size(net_dev);
++ const u8 key_size = cls_key_size(net_dev);
+ void *msk = key + key_size;
+
+ memset(key, 0, key_size * 2);
+@@ -546,51 +532,47 @@ l4ip4:
+ "ToS is not supported for IPv4 L4\n");
+ return -EOPNOTSUPP;
+ }
+- if (l4ip4_m->ip4src &&
+- !dpaa2_cls_is_enabled(net_dev, RXH_IP_SRC)) {
++ if (l4ip4_m->ip4src && !cls_is_enabled(net_dev, RXH_IP_SRC)) {
+ netdev_err(net_dev, "IP SRC not supported!\n");
+ return -EOPNOTSUPP;
+ }
+- if (l4ip4_m->ip4dst &&
+- !dpaa2_cls_is_enabled(net_dev, RXH_IP_DST)) {
++ if (l4ip4_m->ip4dst && !cls_is_enabled(net_dev, RXH_IP_DST)) {
+ netdev_err(net_dev, "IP DST not supported!\n");
+ return -EOPNOTSUPP;
+ }
+- if (l4ip4_m->psrc &&
+- !dpaa2_cls_is_enabled(net_dev, RXH_L4_B_0_1)) {
++ if (l4ip4_m->psrc && !cls_is_enabled(net_dev, RXH_L4_B_0_1)) {
+ netdev_err(net_dev, "PSRC not supported, ignored\n");
+ return -EOPNOTSUPP;
+ }
+- if (l4ip4_m->pdst &&
+- !dpaa2_cls_is_enabled(net_dev, RXH_L4_B_2_3)) {
++ if (l4ip4_m->pdst && !cls_is_enabled(net_dev, RXH_L4_B_2_3)) {
+ netdev_err(net_dev, "PDST not supported, ignored\n");
+ return -EOPNOTSUPP;
+ }
+
+- if (dpaa2_cls_is_enabled(net_dev, RXH_IP_SRC)) {
+- *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_IP_SRC))
++ if (cls_is_enabled(net_dev, RXH_IP_SRC)) {
++ *(u32 *)(key + cls_key_off(net_dev, RXH_IP_SRC))
+ = l4ip4_h->ip4src;
+- *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_IP_SRC))
++ *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_SRC))
+ = l4ip4_m->ip4src;
+ }
+- if (dpaa2_cls_is_enabled(net_dev, RXH_IP_DST)) {
+- *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_IP_DST))
++ if (cls_is_enabled(net_dev, RXH_IP_DST)) {
++ *(u32 *)(key + cls_key_off(net_dev, RXH_IP_DST))
+ = l4ip4_h->ip4dst;
+- *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_IP_DST))
++ *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_DST))
+ = l4ip4_m->ip4dst;
+ }
+
+- if (dpaa2_cls_is_enabled(net_dev, RXH_L4_B_0_1)) {
+- *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_L4_B_0_1))
++ if (cls_is_enabled(net_dev, RXH_L4_B_0_1)) {
++ *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_0_1))
+ = l4ip4_h->psrc;
+- *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_L4_B_0_1))
++ *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_0_1))
+ = l4ip4_m->psrc;
+ }
+
+- if (dpaa2_cls_is_enabled(net_dev, RXH_L4_B_2_3)) {
+- *(u32 *)(key + dpaa2_cls_key_off(net_dev, RXH_L4_B_2_3))
++ if (cls_is_enabled(net_dev, RXH_L4_B_2_3)) {
++ *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_2_3))
+ = l4ip4_h->pdst;
+- *(u32 *)(msk + dpaa2_cls_key_off(net_dev, RXH_L4_B_2_3))
++ *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_2_3))
+ = l4ip4_m->pdst;
+ }
+ break;
+@@ -609,12 +591,10 @@ l4ip4:
+ return -EOPNOTSUPP;
+ }
+
+- if (dpaa2_cls_is_enabled(net_dev, RXH_L2DA)) {
+- ether_addr_copy(key
+- + dpaa2_cls_key_off(net_dev, RXH_L2DA),
++ if (cls_is_enabled(net_dev, RXH_L2DA)) {
++ ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA),
+ eth_h->h_dest);
+- ether_addr_copy(msk
+- + dpaa2_cls_key_off(net_dev, RXH_L2DA),
++ ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA),
+ eth_m->h_dest);
+ } else {
+ if (!is_zero_ether_addr(eth_m->h_dest)) {
+@@ -639,12 +619,10 @@ l4ip4:
+ ext_h = &fs->h_ext;
+ ext_m = &fs->m_ext;
+
+- if (dpaa2_cls_is_enabled(net_dev, RXH_L2DA)) {
+- ether_addr_copy(key
+- + dpaa2_cls_key_off(net_dev, RXH_L2DA),
++ if (cls_is_enabled(net_dev, RXH_L2DA)) {
++ ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA),
+ ext_h->h_dest);
+- ether_addr_copy(msk
+- + dpaa2_cls_key_off(net_dev, RXH_L2DA),
++ ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA),
+ ext_m->h_dest);
+ } else {
+ if (!is_zero_ether_addr(ext_m->h_dest)) {
+@@ -657,9 +635,9 @@ l4ip4:
+ return 0;
+ }
+
+-static int dpaa2_do_cls(struct net_device *net_dev,
+- struct ethtool_rx_flow_spec *fs,
+- bool add)
++static int do_cls(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ bool add)
+ {
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT;
+@@ -674,19 +652,19 @@ static int dpaa2_do_cls(struct net_devic
+ }
+
+ if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
+- fs->ring_cookie >= dpaa2_queue_count(priv)) ||
++ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
+ fs->location >= rule_cnt)
+ return -EINVAL;
+
+ memset(&rule_cfg, 0, sizeof(rule_cfg));
+- rule_cfg.key_size = dpaa2_cls_key_size(net_dev);
++ rule_cfg.key_size = cls_key_size(net_dev);
+
+ /* allocate twice the key size, for the actual key and for mask */
+ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+- err = dpaa2_cls_prep_rule(net_dev, fs, dma_mem);
++ err = prep_cls_rule(net_dev, fs, dma_mem);
+ if (err)
+ goto err_free_mem;
+
+@@ -735,13 +713,13 @@ err_free_mem:
+ return err;
+ }
+
+-static int dpaa2_add_cls(struct net_device *net_dev,
+- struct ethtool_rx_flow_spec *fs)
++static int add_cls(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs)
+ {
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+- err = dpaa2_do_cls(net_dev, fs, true);
++ err = do_cls(net_dev, fs, true);
+ if (err)
+ return err;
+
+@@ -751,12 +729,12 @@ static int dpaa2_add_cls(struct net_devi
+ return 0;
+ }
+
+-static int dpaa2_del_cls(struct net_device *net_dev, int location)
++static int del_cls(struct net_device *net_dev, int location)
+ {
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+- err = dpaa2_do_cls(net_dev, &priv->cls_rule[location].fs, false);
++ err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
+ if (err)
+ return err;
+
+@@ -765,7 +743,7 @@ static int dpaa2_del_cls(struct net_devi
+ return 0;
+ }
+
+-static void dpaa2_clear_cls(struct net_device *net_dev)
++static void clear_cls(struct net_device *net_dev)
+ {
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int i, err;
+@@ -774,7 +752,7 @@ static void dpaa2_clear_cls(struct net_d
+ if (!priv->cls_rule[i].in_use)
+ continue;
+
+- err = dpaa2_del_cls(net_dev, i);
++ err = del_cls(net_dev, i);
+ if (err)
+ netdev_warn(net_dev,
+ "err trying to delete classification entry %d\n",
+@@ -782,8 +760,8 @@ static void dpaa2_clear_cls(struct net_d
+ }
+ }
+
+-static int dpaa2_set_rxnfc(struct net_device *net_dev,
+- struct ethtool_rxnfc *rxnfc)
++static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc)
+ {
+ int err = 0;
+
+@@ -792,19 +770,19 @@ static int dpaa2_set_rxnfc(struct net_de
+ /* first off clear ALL classification rules, chaging key
+ * composition will break them anyway
+ */
+- dpaa2_clear_cls(net_dev);
++ clear_cls(net_dev);
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+- err = dpaa2_set_hash(net_dev, rxnfc->data);
++ err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+- err = dpaa2_add_cls(net_dev, &rxnfc->fs);
++ err = add_cls(net_dev, &rxnfc->fs);
+ break;
+
+ case ETHTOOL_SRXCLSRLDEL:
+- err = dpaa2_del_cls(net_dev, rxnfc->fs.location);
++ err = del_cls(net_dev, rxnfc->fs.location);
+ break;
+
+ default:
+@@ -814,8 +792,8 @@ static int dpaa2_set_rxnfc(struct net_de
+ return err;
+ }
+
+-static int dpaa2_get_rxnfc(struct net_device *net_dev,
+- struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
++static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+ {
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT;
+@@ -831,7 +809,7 @@ static int dpaa2_get_rxnfc(struct net_de
+ break;
+
+ case ETHTOOL_GRXRINGS:
+- rxnfc->data = dpaa2_queue_count(priv);
++ rxnfc->data = dpaa2_eth_queue_count(priv);
+ break;
+
+ case ETHTOOL_GRXCLSRLCNT:
+@@ -868,15 +846,13 @@ static int dpaa2_get_rxnfc(struct net_de
+ }
+
+ const struct ethtool_ops dpaa2_ethtool_ops = {
+- .get_drvinfo = dpaa2_get_drvinfo,
+- .get_msglevel = dpaa2_get_msglevel,
+- .set_msglevel = dpaa2_set_msglevel,
++ .get_drvinfo = dpaa2_eth_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+- .get_settings = dpaa2_get_settings,
+- .set_settings = dpaa2_set_settings,
+- .get_sset_count = dpaa2_get_sset_count,
+- .get_ethtool_stats = dpaa2_get_ethtool_stats,
+- .get_strings = dpaa2_get_strings,
+- .get_rxnfc = dpaa2_get_rxnfc,
+- .set_rxnfc = dpaa2_set_rxnfc,
++ .get_settings = dpaa2_eth_get_settings,
++ .set_settings = dpaa2_eth_set_settings,
++ .get_sset_count = dpaa2_eth_get_sset_count,
++ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
++ .get_strings = dpaa2_eth_get_strings,
++ .get_rxnfc = dpaa2_eth_get_rxnfc,
++ .set_rxnfc = dpaa2_eth_set_rxnfc,
+ };
diff --git a/target/linux/layerscape/patches-4.4/7203-fsl-dpaa2-eth-Update-description-of-DPNI-counters.patch b/target/linux/layerscape/patches-4.4/7203-fsl-dpaa2-eth-Update-description-of-DPNI-counters.patch
new file mode 100644
index 0000000..41d1c04
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7203-fsl-dpaa2-eth-Update-description-of-DPNI-counters.patch
@@ -0,0 +1,37 @@
+From 727de4692d731655dea96702aa099f4f4d3a5203 Mon Sep 17 00:00:00 2001
+From: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+Date: Mon, 21 Mar 2016 16:10:01 +0200
+Subject: [PATCH 203/226] fsl-dpaa2: eth: Update description of DPNI counters
+
+Update description of DPNI counters presented with "ethtool -S".
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+(cherry picked from commit f68aab60355d00af13fdff2ded7bf38809beacd3)
+---
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+@@ -39,15 +39,18 @@
+ char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
+ "rx frames",
+ "rx bytes",
+- "rx frames dropped",
+- "rx err frames",
++ /* rx frames filtered/policed */
++ "rx filtered frames",
++ /* rx frames dropped with errors */
++ "rx discarded frames",
+ "rx mcast frames",
+ "rx mcast bytes",
+ "rx bcast frames",
+ "rx bcast bytes",
+ "tx frames",
+ "tx bytes",
+- "tx err frames",
++ /* tx frames dropped with errors */
++ "tx discarded frames",
+ };
+
+ #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
diff --git a/target/linux/layerscape/patches-4.4/7204-fsl-dpaa2-eth-dpni-Clear-compiler-warnings.patch b/target/linux/layerscape/patches-4.4/7204-fsl-dpaa2-eth-dpni-Clear-compiler-warnings.patch
new file mode 100644
index 0000000..ff74d13
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7204-fsl-dpaa2-eth-dpni-Clear-compiler-warnings.patch
@@ -0,0 +1,38 @@
+From 9a38e2ce3b46a2bdc90b4ad190a26f9418909450 Mon Sep 17 00:00:00 2001
+From: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+Date: Tue, 29 Mar 2016 13:23:50 +0300
+Subject: [PATCH 204/226] fsl-dpaa2: eth: dpni: Clear compiler warnings
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Clear two warnings given by -Wcast-qual:
+warning: cast discards ‘__attribute__((const))’ qualifier from pointer
+target type
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+(cherry picked from commit 96d14f291c2750e8b09268cecb84bfe7f013294d)
+---
+ drivers/staging/fsl-dpaa2/ethernet/dpni.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+@@ -128,7 +128,7 @@ int dpni_prepare_extended_cfg(const stru
+ int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg,
+ const uint8_t *ext_cfg_buf)
+ {
+- uint64_t *ext_params = (uint64_t *)ext_cfg_buf;
++ const uint64_t *ext_params = (const uint64_t *)ext_cfg_buf;
+
+ DPNI_EXT_EXTENDED_CFG(ext_params, cfg);
+
+@@ -1651,7 +1651,7 @@ void dpni_prepare_early_drop(const struc
+ void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
+ const uint8_t *early_drop_buf)
+ {
+- uint64_t *ext_params = (uint64_t *)early_drop_buf;
++ const uint64_t *ext_params = (const uint64_t *)early_drop_buf;
+
+ DPNI_EXT_EARLY_DROP(ext_params, cfg);
+ }
diff --git a/target/linux/layerscape/patches-4.4/7205-fsl-dpaa2-eth-sanitize-supported-private-flags.patch b/target/linux/layerscape/patches-4.4/7205-fsl-dpaa2-eth-sanitize-supported-private-flags.patch
new file mode 100644
index 0000000..0f96213
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7205-fsl-dpaa2-eth-sanitize-supported-private-flags.patch
@@ -0,0 +1,57 @@
+From 51106cb1fd14dfbf62c2760921463376f56ac732 Mon Sep 17 00:00:00 2001
+From: Bogdan Purcareata <bogdan.purcareata at nxp.com>
+Date: Tue, 21 Jun 2016 18:40:47 +0000
+Subject: [PATCH 205/226] fsl-dpaa2: eth: sanitize supported private flags
+
+On linux-v4.6 with CONFIG_MACVLAN=y, when bringing up a ni interface, the
+network stack crashes due to a segfault. This is related to the
+macvlan_device_event notifier, which registers itself to all the network
+interface in the system.
+
+The notifier reads the netdev private flags and incorrectly qualifies
+the interface as a macvlan port, since both the IFF_MACVLAN_PORT and
+IFF_PROMISC flags have the same offset. Code spelunking reveals that
+IFF_PROMISC is only used as an interface flag, not a private interface
+flag.
+
+A similar situation happens with IFF_ALLMULTI, which overlaps with
+IFF_BRIDGE_PORT. No info on the consequences of this, since I haven't
+tested bridge scenarios. The interface can still be set in allmulti
+mode using userspace tools (e.g. ifconfig).
+
+IFF_MULTICAST overlaps with IFF_UNICAST_FLT, therefore the current code
+has no effect as it is. The closest multicast activation based on device
+capabilities has been seen in the case of the Aeroflex Gaisler Ethernet
+MAC (aeroflex/greth.c) - here, the runtime (not private) flag is set on
+device probe. On a side node, ether_setup enables IFF_MULTICAST by default.
+
+Remove IFF_PROMISC, IFF_ALLMULTI and IFF_MULTICAST from device capabilities
+init.
+
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+@@ -1176,18 +1176,13 @@ static int dpaa2_eth_init(struct net_dev
+ u32 options = priv->dpni_attrs.options;
+
+ /* Capabilities listing */
+- supported |= IFF_LIVE_ADDR_CHANGE | IFF_PROMISC | IFF_ALLMULTI;
++ supported |= IFF_LIVE_ADDR_CHANGE;
+
+ if (options & DPNI_OPT_UNICAST_FILTER)
+ supported |= IFF_UNICAST_FLT;
+ else
+ not_supported |= IFF_UNICAST_FLT;
+
+- if (options & DPNI_OPT_MULTICAST_FILTER)
+- supported |= IFF_MULTICAST;
+- else
+- not_supported |= IFF_MULTICAST;
+-
+ net_dev->priv_flags |= supported;
+ net_dev->priv_flags &= ~not_supported;
+
diff --git a/target/linux/layerscape/patches-4.4/7206-fsl-dpaa2-eth-match-id-cleanup.patch b/target/linux/layerscape/patches-4.4/7206-fsl-dpaa2-eth-match-id-cleanup.patch
new file mode 100644
index 0000000..bdb0f58
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7206-fsl-dpaa2-eth-match-id-cleanup.patch
@@ -0,0 +1,26 @@
+From 7e536d0c2f870b39480268c20af6fc3d21abe611 Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 15 Jun 2016 14:03:43 -0500
+Subject: [PATCH 206/226] fsl-dpaa2: eth: match id cleanup
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+@@ -2787,12 +2787,10 @@ static int dpaa2_eth_remove(struct fsl_m
+ return 0;
+ }
+
+-static const struct fsl_mc_device_match_id dpaa2_eth_match_id_table[] = {
++static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpni",
+- .ver_major = DPNI_VER_MAJOR,
+- .ver_minor = DPNI_VER_MINOR
+ },
+ { .vendor = 0x0 }
+ };
diff --git a/target/linux/layerscape/patches-4.4/7207-fsl-dpaa2-eth-add-device-table-to-driver.patch b/target/linux/layerscape/patches-4.4/7207-fsl-dpaa2-eth-add-device-table-to-driver.patch
new file mode 100644
index 0000000..b7885cf
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7207-fsl-dpaa2-eth-add-device-table-to-driver.patch
@@ -0,0 +1,22 @@
+From 8557c8a3823b341607e16048d8318a1958eab3a9 Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Thu, 12 May 2016 17:52:28 -0500
+Subject: [PATCH 207/226] fsl-dpaa2: eth: add device table to driver
+
+this is needed to have the driver loaded as a module
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+@@ -2794,6 +2794,7 @@ static const struct fsl_mc_device_id dpa
+ },
+ { .vendor = 0x0 }
+ };
++MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
+
+ static struct fsl_mc_driver dpaa2_eth_driver = {
+ .driver = {
diff --git a/target/linux/layerscape/patches-4.4/7208-staging-fsl-dpaa2-mac-Added-MAC-PHY-interface-driver.patch b/target/linux/layerscape/patches-4.4/7208-staging-fsl-dpaa2-mac-Added-MAC-PHY-interface-driver.patch
new file mode 100644
index 0000000..42a23a5
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7208-staging-fsl-dpaa2-mac-Added-MAC-PHY-interface-driver.patch
@@ -0,0 +1,2347 @@
+From ecaf55d2907835cd0580903e134cdf08416ff694 Mon Sep 17 00:00:00 2001
+From: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Date: Tue, 15 Sep 2015 10:27:19 -0500
+Subject: [PATCH 208/226] staging: fsl-dpaa2: mac: Added MAC / PHY interface
+ driver
+
+This is a commit of the cummulative, squashed dpmac patches.
+All the commit logs are preserved below.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+--------------------------------------------------------------
+
+flib,dpmac: add dpmac files (Rebasing onto kernel 3.19, MC 0.6)
+
+patches moved from 4.0 kernel
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+[Stuart: cherry-picked patch and split it up]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+staging: fsl-dpaa2: mac: Added MAC / PHY interface driver.
+
+This driver works as a proxy between phylib including phy drivers and
+the MC firmware. It receives updates on link state changes from PHY
+lib and forwards them to MC and receives interrupt from MC whenever
+a request is made to change the link state.
+
+Signed-off-by: Alex Marginean <alexandru.marginean at freescale.com>
+Change-Id: I8097ea69ea8effded3bddd43b9d326bbb59ba6c8
+Reviewed-on: http://git.am.freescale.net:8181/35113
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-dpaa2: mac: Change IRQ flags
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Ia86570858f9cf7f673089cd7c2078662d56b2f01
+Reviewed-on: http://git.am.freescale.net:8181/35581
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-dpaa2: mac: Check for actual link state change
+
+Do not invoke the MC firmware if the link state hasn't changed.
+
+Signed-off-by: Alex Marginean <alexandru.marginean at freescale.com>
+Change-Id: Iba59d8b52c72334efa28f6126e50ec821c802852
+Reviewed-on: http://git.am.freescale.net:8181/35582
+Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-dpaa2: mac: Fix "dpmac netdevs" probing
+
+Fixup code under DPAA2_MAC_NETDEVS to probe again. In particular, remove
+the temporary addition of "fixed.c" in the mac/ folder.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Iea6768f3c5cd9b2de2c8421c03ecebf155b9792b
+Reviewed-on: http://git.am.freescale.net:8181/37673
+Reviewed-by: Ruxandra Ioana Radulescu <ruxandra.radulescu at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+Tested-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+flib: Remove duplicate header files
+
+These files are included by the DPAA2 mac driver files.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+Change-Id: Ieff56e3c34393ef65a5ac1123aaf00bacefa050c
+Reviewed-on: http://git.am.freescale.net:8181/37257
+Reviewed-by: Alexandru Marginean <Alexandru.Marginean at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+Tested-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-dpaa2: mac: Add dependency on CONFIG_FIXED_PHY
+
+The DPAA2 DPMAC driver currently relies on fixed links, so it will fail
+to probe in unusual ways if CONFIG_FIXED_PHY is not enabled.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Change-Id: Ibc53226a215ed85a2ba22c55b18595fb939e7418
+Reviewed-on: http://git.am.freescale.net:8181/37687
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-dpaa2: mac: Fix macro
+
+Remove macro ending backslash.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+Change-Id: Ib0c4a41eee8fbe4aa7c991fc7fdb87771d3bf594
+Reviewed-on: http://git.am.freescale.net:8181/37254
+Tested-by: Review Code-CDREVIEW <CDREVIEW at freescale.com>
+Reviewed-by: Alexandru Marginean <Alexandru.Marginean at freescale.com>
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-dpaa2: mac: migrated remaining flibs for MC fw 8.0.0
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+[Stuart: split mac part out of original patch, updated subject]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+staging: fsl-dpaa2: mac: Port to MC-0.7 Flibs
+
+Change-Id: Ief731e245bdc207f1bf8e7ff4dfdabb445d6010e
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Reviewed-on: http://git.am.freescale.net:8181/39151
+Reviewed-by: Stuart Yoder <stuart.yoder at freescale.com>
+Tested-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+staging: fsl-dpaa2: mac: Do programing of MSIs in devm_request_threaded_irq()
+
+With the new dprc_set_obj_irq() we can now program MSIS in the device
+in the callback invoked from devm_request_threaded_irq().
+Since this callback is invoked with interrupts disabled, we need to
+use an atomic portal, instead of the root DPRC's built-in portal
+which is non-atomic.
+
+Signed-off-by: Itai Katz <itai.katz at freescale.com>
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+[Stuart: split original patch up by component]
+Signed-off-by: Stuart Yoder <stuart.yoder at freescale.com>
+
+fsl-dpaa2: mac: Fix driver probing
+
+The DPMAC probing function was broken in many ways. This patch adds
+the following fixes:
+ - Look up PHY nodes based on the phy-handle property of the respective
+ DPMAC node;
+ - Defer DPMAC device probing until the MDIO MUX driver probes first (we
+ depend on that for configuring the PHYs on PCIe riser cards on
+ LS2085A QDS boards.
+ - Add Kconfig dependencies on XGMAC_MDIO and MDIO_BUS_MUX_MMIOREG.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+fsl-dpaa2: mac: Fix interrupt handling
+
+The DPMAC has two interrupt events muxed on a single interrupt line.
+Both the PHY and the DPNI can initiate a link event.
+
+When the link event is initiated by the PHY (possibly as the effect of an
+earlier link change request initiated by a DPNI), we must make sure
+dpmac_set_link_state() is explicitly called in order for the event to be
+propagated (back) to the DPNI.
+
+Finally, DPMAC interrupt mask has to be explicitly specified before calling
+dpmac_set_irq_enabled().
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+fsl-dpaa2: mac: Fix print in case device is not initialized
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+fsl-dpaa2: mac: Fix error paths at probe
+
+Merge error condition checks. Add error codes to the early exit paths.
+Fix swapped goto labels.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+fsl-dpaa2: mac: Remove unused function prototype
+
+fixed_phy_register_2() was a leftover since we had to backport fixed PHY
+implementation on kernel v3.10.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+
+fsl-dpaa2/mac: Update dpmac binary interface to v3.2
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: mac: Update Flib to MC 0.8.1
+
+In practice, this adds a counter for "good" egress frames.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Add counter for "good" egress frames
+
+Now available with the 0.8.1 Flibs.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Update dpmac_set_link_state() error checking
+
+As of 0.8.1 Flibs, dpmac_set_link_state() no longer returns what we'd
+consider spurious errors. This allows for cleaner error checking on
+DPMAC-side.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Remove __cold attribute
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Check DPMAC FLIB version
+
+Make sure we support the DPMAC version, otherwise abort probing
+early on and provide an error message.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at freescale.com>
+
+fsl-dpaa2: mac: Replace uintX_t with uX
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Fix crash on error path
+
+If the fixed-phy cannot be correctly registered, unregister_netdev()
+receives a non-NULL, yet invalid phydev. Force the phydev reference to
+NULL to avoid a crash on the probe routine's error path.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Remove TODOs comments from the code
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Fix ppx_eth_iface_mode order
+
+ppx_eth_iface_mode must be kept in sync with enum dpmac_eth_if, but some
+array values weren't in the right order.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Remove forward declarations of functions
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Remove ppx_{err,warn,info} macros
+
+Replace with their straighforward equivalents, their contexts being
+non-ambiguous.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Use non-atomic MC portal
+
+The DPMAC driver does not make MC calls from atomic contexts, so it is
+safe to request non-atomic MC portals.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: Replace "ppx" prefix with "dpaa2_mac"
+
+Use a similar naming convention as for the Ethernet driver,
+replacing "ppx" with "dpaa2_mac" as prefix for functions and
+structures.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: mac: Remove unnecessary blank line
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+
+fsl-dpaa2: mac: Do not handle link change confirmation interrupt
+
+The need for that interrupt is more about debugging.
+
+Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc at nxp.com>
+
+fsl-dpaa2: mac: resolve compile issues on uprev to 4.5
+
+-interrupt info in mc struct changed upstream
+-fixed_phy_register() had new argument
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ MAINTAINERS | 6 +
+ drivers/staging/fsl-dpaa2/Kconfig | 1 +
+ drivers/staging/fsl-dpaa2/Makefile | 1 +
+ drivers/staging/fsl-dpaa2/mac/Kconfig | 24 +
+ drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 195 ++++++++
+ drivers/staging/fsl-dpaa2/mac/dpmac.c | 422 ++++++++++++++++
+ drivers/staging/fsl-dpaa2/mac/dpmac.h | 593 ++++++++++++++++++++++
+ drivers/staging/fsl-dpaa2/mac/mac.c | 767 +++++++++++++++++++++++++++++
+ 9 files changed, 2019 insertions(+)
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -4554,6 +4554,12 @@ S: Maintained
+ F: drivers/staging/fsl-mc/bus/mc-ioctl.h
+ F: drivers/staging/fsl-mc/bus/mc-restool.c
+
++FREESCALE DPAA2 MAC/PHY INTERFACE DRIVER
++M: Alex Marginean <Alexandru.Marginean at freescale.com>
++L: linux-kernel at vger.kernel.org
++S: Maintained
++F: drivers/staging/fsl-dpaa2/mac/
++
+ FREEVXFS FILESYSTEM
+ M: Christoph Hellwig <hch at infradead.org>
+ W: ftp://ftp.openlinux.org/pub/people/hch/vxfs
+--- a/drivers/staging/fsl-dpaa2/Kconfig
++++ b/drivers/staging/fsl-dpaa2/Kconfig
+@@ -9,3 +9,4 @@ config FSL_DPAA2
+ Build drivers for Freescale DataPath Acceleration Architecture (DPAA2) family of SoCs.
+ # TODO move DPIO driver in-here?
+ source "drivers/staging/fsl-dpaa2/ethernet/Kconfig"
++source "drivers/staging/fsl-dpaa2/mac/Kconfig"
+--- a/drivers/staging/fsl-dpaa2/Makefile
++++ b/drivers/staging/fsl-dpaa2/Makefile
+@@ -3,3 +3,4 @@
+ #
+
+ obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
++obj-$(CONFIG_FSL_DPAA2_MAC) += mac/
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/Kconfig
+@@ -0,0 +1,24 @@
++config FSL_DPAA2_MAC
++ tristate "DPAA2 MAC / PHY interface"
++ depends on FSL_MC_BUS && FSL_DPAA2
++ select MDIO_BUS_MUX_MMIOREG
++ select FSL_XGMAC_MDIO
++ select FIXED_PHY
++ ---help---
++ Prototype driver for DPAA2 MAC / PHY interface object.
++ This driver works as a proxy between phylib including phy drivers and
++ the MC firmware. It receives updates on link state changes from PHY
++ lib and forwards them to MC and receives interrupt from MC whenever
++ a request is made to change the link state.
++
++
++config FSL_DPAA2_MAC_NETDEVS
++ bool "Expose net interfaces for PHYs"
++ default n
++ depends on FSL_DPAA2_MAC
++ ---help---
++ Exposes macX net interfaces which allow direct control over MACs and
++ PHYs.
++ .
++ Leave disabled if unsure.
++
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/Makefile
+@@ -0,0 +1,10 @@
++
++obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o
++
++dpaa2-mac-objs := mac.o dpmac.o
++
++all:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
++
++clean:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
+@@ -0,0 +1,195 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPMAC_CMD_H
++#define _FSL_DPMAC_CMD_H
++
++/* DPMAC Version */
++#define DPMAC_VER_MAJOR 3
++#define DPMAC_VER_MINOR 2
++
++/* Command IDs */
++#define DPMAC_CMDID_CLOSE 0x800
++#define DPMAC_CMDID_OPEN 0x80c
++#define DPMAC_CMDID_CREATE 0x90c
++#define DPMAC_CMDID_DESTROY 0x900
++
++#define DPMAC_CMDID_GET_ATTR 0x004
++#define DPMAC_CMDID_RESET 0x005
++
++#define DPMAC_CMDID_SET_IRQ 0x010
++#define DPMAC_CMDID_GET_IRQ 0x011
++#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012
++#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013
++#define DPMAC_CMDID_SET_IRQ_MASK 0x014
++#define DPMAC_CMDID_GET_IRQ_MASK 0x015
++#define DPMAC_CMDID_GET_IRQ_STATUS 0x016
++#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPMAC_CMDID_MDIO_READ 0x0c0
++#define DPMAC_CMDID_MDIO_WRITE 0x0c1
++#define DPMAC_CMDID_GET_LINK_CFG 0x0c2
++#define DPMAC_CMDID_SET_LINK_STATE 0x0c3
++#define DPMAC_CMDID_GET_COUNTER 0x0c4
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_CREATE(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_OPEN(cmd, dpmac_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\
++ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\
++ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_MDIO_READ(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_MDIO_READ(cmd, data) \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \
++ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_GET_COUNTER(cmd, type) \
++ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_COUNTER(cmd, counter) \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
++
++#endif /* _FSL_DPMAC_CMD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
+@@ -0,0 +1,422 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../fsl-mc/include/mc-sys.h"
++#include "../../fsl-mc/include/mc-cmd.h"
++#include "dpmac.h"
++#include "dpmac-cmd.h"
++
++int dpmac_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpmac_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPMAC_CMD_OPEN(cmd, dpmac_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return err;
++}
++
++int dpmac_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpmac_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPMAC_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpmac_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpmac_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpmac_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPMAC_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_GET_ATTRIBUTES(cmd, attr);
++
++ return 0;
++}
++
++int dpmac_mdio_read(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_mdio_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_READ,
++ cmd_flags,
++ token);
++ DPMAC_CMD_MDIO_READ(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_MDIO_READ(cmd, cfg->data);
++
++ return 0;
++}
++
++int dpmac_mdio_write(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_mdio_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_WRITE,
++ cmd_flags,
++ token);
++ DPMAC_CMD_MDIO_WRITE(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err = 0;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPMAC_RSP_GET_LINK_CFG(cmd, cfg);
++
++ return 0;
++}
++
++int dpmac_set_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_link_state *link_state)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
++ cmd_flags,
++ token);
++ DPMAC_CMD_SET_LINK_STATE(cmd, link_state);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpmac_counter type,
++ uint64_t *counter)
++{
++ struct mc_command cmd = { 0 };
++ int err = 0;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
++ cmd_flags,
++ token);
++ DPMAC_CMD_GET_COUNTER(cmd, type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPMAC_RSP_GET_COUNTER(cmd, *counter);
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h
+@@ -0,0 +1,593 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPMAC_H
++#define __FSL_DPMAC_H
++
++/* Data Path MAC API
++ * Contains initialization APIs and runtime control APIs for DPMAC
++ */
++
++struct fsl_mc_io;
++
++/**
++ * dpmac_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpmac_id: DPMAC unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpmac_create function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpmac_id,
++ uint16_t *token);
++
++/**
++ * dpmac_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * enum dpmac_link_type - DPMAC link type
++ * @DPMAC_LINK_TYPE_NONE: No link
++ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
++ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
++ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
++ */
++enum dpmac_link_type {
++ DPMAC_LINK_TYPE_NONE,
++ DPMAC_LINK_TYPE_FIXED,
++ DPMAC_LINK_TYPE_PHY,
++ DPMAC_LINK_TYPE_BACKPLANE
++};
++
++/**
++ * enum dpmac_eth_if - DPMAC Ethrnet interface
++ * @DPMAC_ETH_IF_MII: MII interface
++ * @DPMAC_ETH_IF_RMII: RMII interface
++ * @DPMAC_ETH_IF_SMII: SMII interface
++ * @DPMAC_ETH_IF_GMII: GMII interface
++ * @DPMAC_ETH_IF_RGMII: RGMII interface
++ * @DPMAC_ETH_IF_SGMII: SGMII interface
++ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
++ * @DPMAC_ETH_IF_XAUI: XAUI interface
++ * @DPMAC_ETH_IF_XFI: XFI interface
++ */
++enum dpmac_eth_if {
++ DPMAC_ETH_IF_MII,
++ DPMAC_ETH_IF_RMII,
++ DPMAC_ETH_IF_SMII,
++ DPMAC_ETH_IF_GMII,
++ DPMAC_ETH_IF_RGMII,
++ DPMAC_ETH_IF_SGMII,
++ DPMAC_ETH_IF_QSGMII,
++ DPMAC_ETH_IF_XAUI,
++ DPMAC_ETH_IF_XFI
++};
++
++/**
++ * struct dpmac_cfg - Structure representing DPMAC configuration
++ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
++ * the MAC IDs are continuous.
++ * For example: 2 WRIOPs, 16 MACs in each:
++ * MAC IDs for the 1st WRIOP: 1-16,
++ * MAC IDs for the 2nd WRIOP: 17-32.
++ */
++struct dpmac_cfg {
++ int mac_id;
++};
++
++/**
++ * dpmac_create() - Create the DPMAC object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPMAC object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpmac_open function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpmac_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpmac_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * DPMAC IRQ Index and Events
++ */
++
++/**
++ * IRQ index
++ */
++#define DPMAC_IRQ_INDEX 0
++/**
++ * IRQ event - indicates a change in link state
++ */
++#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
++/**
++ * IRQ event - Indicates that the link state changed
++ */
++#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
++
++/**
++ * struct dpmac_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpmac_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpmac_irq_cfg *irq_cfg);
++
++/**
++ * dpmac_get_irq() - Get IRQ information from the DPMAC.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpmac_irq_cfg *irq_cfg);
++
++/**
++ * dpmac_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpmac_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpmac_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpmac_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpmac_get_irq_status() - Get the current status of any pending interrupts.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpmac_clear_irq_status() - Clear a pending interrupt's status
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @status: Bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpmac_attr - Structure representing DPMAC attributes
++ * @id: DPMAC object ID
++ * @phy_id: PHY ID
++ * @link_type: link type
++ * @eth_if: Ethernet interface
++ * @max_rate: Maximum supported rate - in Mbps
++ * @version: DPMAC version
++ */
++struct dpmac_attr {
++ int id;
++ int phy_id;
++ enum dpmac_link_type link_type;
++ enum dpmac_eth_if eth_if;
++ uint32_t max_rate;
++ /**
++ * struct version - Structure representing DPMAC version
++ * @major: DPMAC major version
++ * @minor: DPMAC minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++};
++
++/**
++ * dpmac_get_attributes - Retrieve DPMAC attributes.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_attr *attr);
++
++/**
++ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters
++ * @phy_addr: MDIO device address
++ * @reg: Address of the register within the Clause 45 PHY device from which data
++ * is to be read
++ * @data: Data read/write from/to MDIO
++ */
++struct dpmac_mdio_cfg {
++ uint8_t phy_addr;
++ uint8_t reg;
++ uint16_t data;
++};
++
++/**
++ * dpmac_mdio_read() - Perform MDIO read transaction
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @cfg: Structure with MDIO transaction parameters
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_mdio_read(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_mdio_cfg *cfg);
++
++/**
++ * dpmac_mdio_write() - Perform MDIO write transaction
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @cfg: Structure with MDIO transaction parameters
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_mdio_write(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_mdio_cfg *cfg);
++
++/**
++ * DPMAC link configuration/state options
++ */
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct dpmac_link_cfg - Structure representing DPMAC link configuration
++ * @rate: Link's rate - in Mbps
++ * @options: Enable/Disable DPMAC link cfg features (bitmap)
++ */
++struct dpmac_link_cfg {
++ uint32_t rate;
++ uint64_t options;
++};
++
++/**
++ * dpmac_get_link_cfg() - Get Ethernet link configuration
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @cfg: Returned structure with the link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_link_cfg *cfg);
++
++/**
++ * struct dpmac_link_state - DPMAC link configuration request
++ * @rate: Rate in Mbps
++ * @options: Enable/Disable DPMAC link cfg features (bitmap)
++ * @up: Link state
++ */
++struct dpmac_link_state {
++ uint32_t rate;
++ uint64_t options;
++ int up;
++};
++
++/**
++ * dpmac_set_link_state() - Set the Ethernet link status
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @link_state: Link state configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_link_state *link_state);
++
++/**
++ * enum dpmac_counter - DPMAC counter types
++ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
++ * (up to max frame length specified),
++ * good or bad.
++ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
++ * with a wrong CRC
++ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
++ * specified, with a bad frame check sequence.
++ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
++ * Occurs when a receive FIFO overflows.
++ * Includes also frames truncated as a result of
++ * the receive FIFO overflow.
++ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
++ * (optional used for wrong SFD).
++ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
++ * bytes long with a good CRC.
++ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
++ * specified, with a good frame check sequence.
++ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
++ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
++ * (regular and PFC).
++ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
++ * frames and valid pause frames.
++ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
++ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
++ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
++ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
++ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
++ * (except for undersized/fragment frame).
++ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
++ * frames and valid pause frames transmitted.
++ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
++ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
++ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
++ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
++ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
++ * pause frames.
++ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
++ * pause frames.
++ */
++enum dpmac_counter {
++ DPMAC_CNT_ING_FRAME_64,
++ DPMAC_CNT_ING_FRAME_127,
++ DPMAC_CNT_ING_FRAME_255,
++ DPMAC_CNT_ING_FRAME_511,
++ DPMAC_CNT_ING_FRAME_1023,
++ DPMAC_CNT_ING_FRAME_1518,
++ DPMAC_CNT_ING_FRAME_1519_MAX,
++ DPMAC_CNT_ING_FRAG,
++ DPMAC_CNT_ING_JABBER,
++ DPMAC_CNT_ING_FRAME_DISCARD,
++ DPMAC_CNT_ING_ALIGN_ERR,
++ DPMAC_CNT_EGR_UNDERSIZED,
++ DPMAC_CNT_ING_OVERSIZED,
++ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
++ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
++ DPMAC_CNT_ING_BYTE,
++ DPMAC_CNT_ING_MCAST_FRAME,
++ DPMAC_CNT_ING_BCAST_FRAME,
++ DPMAC_CNT_ING_ALL_FRAME,
++ DPMAC_CNT_ING_UCAST_FRAME,
++ DPMAC_CNT_ING_ERR_FRAME,
++ DPMAC_CNT_EGR_BYTE,
++ DPMAC_CNT_EGR_MCAST_FRAME,
++ DPMAC_CNT_EGR_BCAST_FRAME,
++ DPMAC_CNT_EGR_UCAST_FRAME,
++ DPMAC_CNT_EGR_ERR_FRAME,
++ DPMAC_CNT_ING_GOOD_FRAME,
++ DPMAC_CNT_ENG_GOOD_FRAME
++};
++
++/**
++ * dpmac_get_counter() - Read a specific DPMAC counter
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @type: The requested counter
++ * @counter: Returned counter value
++ *
++ * Return: The requested counter; '0' otherwise.
++ */
++int dpmac_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpmac_counter type,
++ uint64_t *counter);
++
++#endif /* __FSL_DPMAC_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/mac/mac.c
+@@ -0,0 +1,767 @@
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_vlan.h>
++
++#include <uapi/linux/if_bridge.h>
++#include <net/netlink.h>
++
++#include <linux/of.h>
++#include <linux/of_mdio.h>
++#include <linux/of_net.h>
++#include <linux/phy.h>
++#include <linux/phy_fixed.h>
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++
++#include "../../fsl-mc/include/mc.h"
++#include "../../fsl-mc/include/mc-sys.h"
++
++#include "dpmac.h"
++#include "dpmac-cmd.h"
++
++#define DPAA2_SUPPORTED_DPMAC_VERSION 3
++
++struct dpaa2_mac_priv {
++ struct net_device *netdev;
++ struct fsl_mc_device *mc_dev;
++ struct dpmac_attr attr;
++ struct dpmac_link_state old_state;
++};
++
++/* TODO: fix the 10G modes, mapping can't be right:
++ * XGMII is paralel
++ * XAUI is serial, using 8b/10b encoding
++ * XFI is also serial but using 64b/66b encoding
++ * they can't all map to XGMII...
++ *
++ * This must be kept in sync with enum dpmac_eth_if.
++ */
++static phy_interface_t dpaa2_mac_iface_mode[] = {
++ /* DPMAC_ETH_IF_MII */
++ PHY_INTERFACE_MODE_MII,
++ /* DPMAC_ETH_IF_RMII */
++ PHY_INTERFACE_MODE_RMII,
++ /* DPMAC_ETH_IF_SMII */
++ PHY_INTERFACE_MODE_SMII,
++ /* DPMAC_ETH_IF_GMII */
++ PHY_INTERFACE_MODE_GMII,
++ /* DPMAC_ETH_IF_RGMII */
++ PHY_INTERFACE_MODE_RGMII,
++ /* DPMAC_ETH_IF_SGMII */
++ PHY_INTERFACE_MODE_SGMII,
++ /* DPMAC_ETH_IF_QSGMII */
++ PHY_INTERFACE_MODE_QSGMII,
++ /* DPMAC_ETH_IF_XAUI */
++ PHY_INTERFACE_MODE_XGMII,
++ /* DPMAC_ETH_IF_XFI */
++ PHY_INTERFACE_MODE_XGMII,
++};
++
++static void dpaa2_mac_link_changed(struct net_device *netdev)
++{
++ struct phy_device *phydev;
++ struct dpmac_link_state state = { 0 };
++ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
++ int err;
++
++ /* the PHY just notified us of link state change */
++ phydev = netdev->phydev;
++
++ state.up = !!phydev->link;
++ if (phydev->link) {
++ state.rate = phydev->speed;
++
++ if (!phydev->duplex)
++ state.options |= DPMAC_LINK_OPT_HALF_DUPLEX;
++ if (phydev->autoneg)
++ state.options |= DPMAC_LINK_OPT_AUTONEG;
++
++ netif_carrier_on(netdev);
++ } else {
++ netif_carrier_off(netdev);
++ }
++
++ if (priv->old_state.up != state.up ||
++ priv->old_state.rate != state.rate ||
++ priv->old_state.options != state.options) {
++ priv->old_state = state;
++ phy_print_status(phydev);
++ }
++
++ /* We must call into the MC firmware at all times, because we don't know
++ * when and whether a potential DPNI may have read the link state.
++ */
++ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
++ priv->mc_dev->mc_handle, &state);
++ if (unlikely(err))
++ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
++}
++
++/* IRQ bits that we handle */
++static const u32 dpmac_irq_mask = DPMAC_IRQ_EVENT_LINK_CFG_REQ;
++
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ /* we don't support I/O for now, drop the frame */
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++}
++
++static int dpaa2_mac_open(struct net_device *netdev)
++{
++ /* start PHY state machine */
++ phy_start(netdev->phydev);
++
++ return 0;
++}
++
++static int dpaa2_mac_stop(struct net_device *netdev)
++{
++ if (!netdev->phydev)
++ goto done;
++
++ /* stop PHY state machine */
++ phy_stop(netdev->phydev);
++
++ /* signal link down to firmware */
++ netdev->phydev->link = 0;
++ dpaa2_mac_link_changed(netdev);
++
++done:
++ return 0;
++}
++
++static int dpaa2_mac_get_settings(struct net_device *netdev,
++ struct ethtool_cmd *cmd)
++{
++ return phy_ethtool_gset(netdev->phydev, cmd);
++}
++
++static int dpaa2_mac_set_settings(struct net_device *netdev,
++ struct ethtool_cmd *cmd)
++{
++ return phy_ethtool_sset(netdev->phydev, cmd);
++}
++
++static struct rtnl_link_stats64
++*dpaa2_mac_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *storage)
++{
++ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
++ u64 tmp;
++ int err;
++
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_MCAST_FRAME,
++ &storage->tx_packets);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_BCAST_FRAME, &tmp);
++ if (err)
++ goto error;
++ storage->tx_packets += tmp;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_UCAST_FRAME, &tmp);
++ if (err)
++ goto error;
++ storage->tx_packets += tmp;
++
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_BYTE, &storage->tx_bytes);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors);
++ if (err)
++ goto error;
++
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_FRAME_DISCARD,
++ &storage->rx_dropped);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors);
++ if (err)
++ goto error;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_OVERSIZED, &tmp);
++ if (err)
++ goto error;
++ storage->rx_errors += tmp;
++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
++ DPMAC_CNT_ING_BYTE, &storage->rx_bytes);
++ if (err)
++ goto error;
++
++ return storage;
++
++error:
++ netdev_err(netdev, "dpmac_get_counter err %d\n", err);
++ return storage;
++}
++
++static struct {
++ enum dpmac_counter id;
++ char name[ETH_GSTRING_LEN];
++} dpaa2_mac_counters[] = {
++ {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"},
++ {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"},
++ {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"},
++ {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"},
++ {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"},
++ {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"},
++ {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"},
++ {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"},
++ {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"},
++ {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"},
++ {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"},
++ {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"},
++ {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"},
++ {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"},
++ {DPMAC_CNT_ING_FRAG, "rx frags"},
++ {DPMAC_CNT_ING_JABBER, "rx jabber"},
++ {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"},
++ {DPMAC_CNT_ING_OVERSIZED, "rx oversized"},
++ {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"},
++ {DPMAC_CNT_ING_BYTE, "rx bytes"},
++ {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"},
++ {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"},
++ {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"},
++ {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"},
++ {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"},
++ {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"},
++ {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"},
++ {DPMAC_CNT_EGR_BYTE, "tx bytes"},
++
++};
++
++static void dpaa2_mac_get_strings(struct net_device *netdev,
++ u32 stringset, u8 *data)
++{
++ int i;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ dpaa2_mac_counters[i].name,
++ ETH_GSTRING_LEN);
++ break;
++ }
++}
++
++static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
++ int i;
++ int err;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) {
++ err = dpmac_get_counter(priv->mc_dev->mc_io,
++ 0,
++ priv->mc_dev->mc_handle,
++ dpaa2_mac_counters[i].id, &data[i]);
++ if (err)
++ netdev_err(netdev, "dpmac_get_counter[%s] err %d\n",
++ dpaa2_mac_counters[i].name, err);
++ }
++}
++
++static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(dpaa2_mac_counters);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static const struct net_device_ops dpaa2_mac_ndo_ops = {
++ .ndo_start_xmit = &dpaa2_mac_drop_frame,
++ .ndo_open = &dpaa2_mac_open,
++ .ndo_stop = &dpaa2_mac_stop,
++ .ndo_get_stats64 = &dpaa2_mac_get_stats,
++};
++
++static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
++ .get_settings = &dpaa2_mac_get_settings,
++ .set_settings = &dpaa2_mac_set_settings,
++ .get_strings = &dpaa2_mac_get_strings,
++ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
++ .get_sset_count = &dpaa2_mac_get_sset_count,
++};
++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
++
++static int configure_link(struct dpaa2_mac_priv *priv,
++ struct dpmac_link_cfg *cfg)
++{
++ struct phy_device *phydev = priv->netdev->phydev;
++
++ if (!phydev) {
++ dev_warn(priv->netdev->dev.parent,
++ "asked to change PHY settings but PHY ref is NULL, ignoring\n");
++ return 0;
++ }
++
++ phydev->speed = cfg->rate;
++ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
++
++ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) {
++ phydev->autoneg = 1;
++ phydev->advertising |= ADVERTISED_Autoneg;
++ } else {
++ phydev->autoneg = 0;
++ phydev->advertising &= ~ADVERTISED_Autoneg;
++ }
++
++ phy_start_aneg(phydev);
++
++ return 0;
++}
++
++static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
++ struct dpmac_link_cfg link_cfg;
++ u8 irq_index = DPMAC_IRQ_INDEX;
++ u32 status, clear = 0;
++ int err;
++
++ if (mc_dev->irqs[0]->msi_desc->irq != irq_num) {
++ dev_err(dev, "received unexpected interrupt %d!\n", irq_num);
++ goto err;
++ }
++
++ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ irq_index, &status);
++ if (err) {
++ dev_err(dev, "dpmac_get_irq_status err %d\n", err);
++ clear = ~0x0u;
++ goto out;
++ }
++
++ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
++ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
++ dev_dbg(dev, "DPMAC IRQ %d - LINK_CFG_REQ\n", irq_num);
++ clear |= DPMAC_IRQ_EVENT_LINK_CFG_REQ;
++
++ err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ &link_cfg);
++ if (err) {
++ dev_err(dev, "dpmac_get_link_cfg err %d\n", err);
++ goto out;
++ }
++
++ err = configure_link(priv, &link_cfg);
++ if (err) {
++ dev_err(dev, "cannot configure link\n");
++ goto out;
++ }
++ }
++
++out:
++ err = dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ irq_index, clear);
++ if (err < 0)
++ dev_err(&mc_dev->dev, "dpmac_clear_irq_status() err %d\n", err);
++
++ return IRQ_HANDLED;
++
++err:
++ dev_warn(dev, "DPMAC IRQ %d was not handled!\n", irq_num);
++ return IRQ_NONE;
++}
++
++static int setup_irqs(struct fsl_mc_device *mc_dev)
++{
++ int err;
++
++ err = fsl_mc_allocate_irqs(mc_dev);
++ if (err) {
++ dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err);
++ return err;
++ }
++
++ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, dpmac_irq_mask);
++ if (err < 0) {
++ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
++ goto free_irq;
++ }
++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, 0);
++ if (err) {
++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
++ goto free_irq;
++ }
++
++ err = devm_request_threaded_irq(&mc_dev->dev,
++ mc_dev->irqs[0]->msi_desc->irq,
++ NULL, &dpaa2_mac_irq_handler,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(&mc_dev->dev), &mc_dev->dev);
++ if (err) {
++ dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n",
++ err);
++ goto free_irq;
++ }
++
++ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, dpmac_irq_mask);
++ if (err < 0) {
++ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
++ goto free_irq;
++ }
++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, 1);
++ if (err) {
++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
++ goto unregister_irq;
++ }
++
++ return 0;
++
++unregister_irq:
++ devm_free_irq(&mc_dev->dev, mc_dev->irqs[0]->msi_desc->irq, &mc_dev->dev);
++free_irq:
++ fsl_mc_free_irqs(mc_dev);
++
++ return err;
++}
++
++static void teardown_irqs(struct fsl_mc_device *mc_dev)
++{
++ int err;
++
++ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, dpmac_irq_mask);
++ if (err < 0)
++ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
++
++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, 0);
++ if (err < 0)
++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
++
++ devm_free_irq(&mc_dev->dev, mc_dev->irqs[0]->msi_desc->irq, &mc_dev->dev);
++ fsl_mc_free_irqs(mc_dev);
++}
++
++static struct device_node *lookup_node(struct device *dev, int dpmac_id)
++{
++ struct device_node *dpmacs, *dpmac = NULL;
++ struct device_node *mc_node = dev->of_node;
++ const void *id;
++ int lenp;
++ int dpmac_id_be32 = cpu_to_be32(dpmac_id);
++
++ dpmacs = of_find_node_by_name(mc_node, "dpmacs");
++ if (!dpmacs) {
++ dev_err(dev, "No dpmacs subnode in device-tree\n");
++ return NULL;
++ }
++
++ while ((dpmac = of_get_next_child(dpmacs, dpmac))) {
++ id = of_get_property(dpmac, "reg", &lenp);
++ if (!id || lenp != sizeof(int)) {
++ dev_warn(dev, "Unsuitable reg property in dpmac node\n");
++ continue;
++ }
++ if (*(int *)id == dpmac_id_be32)
++ return dpmac;
++ }
++
++ return NULL;
++}
++
++static int check_dpmac_version(struct dpaa2_mac_priv *priv)
++{
++ struct device *dev = &priv->mc_dev->dev;
++ int mc_version = priv->attr.version.major;
++
++ /* Check that the FLIB-defined version matches the one reported by MC */
++ if (mc_version != DPMAC_VER_MAJOR) {
++ dev_err(dev, "DPMAC FLIB version mismatch: MC says %d, we have %d\n",
++ mc_version, DPMAC_VER_MAJOR);
++ return -EINVAL;
++ }
++
++ /* ... and that we actually support it */
++ if (mc_version < DPAA2_SUPPORTED_DPMAC_VERSION) {
++ dev_err(dev, "Unsupported DPMAC FLIB version (%d)\n",
++ mc_version);
++ return -EINVAL;
++ }
++
++ dev_dbg(dev, "Using DPMAC FLIB version %d\n", mc_version);
++
++ return 0;
++}
++
++static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev)
++{
++ struct device *dev;
++ struct dpaa2_mac_priv *priv = NULL;
++ struct device_node *phy_node, *dpmac_node;
++ struct net_device *netdev;
++ phy_interface_t if_mode;
++ int err = 0;
++
++ /* just being completely paranoid */
++ if (!mc_dev)
++ return -EFAULT;
++ dev = &mc_dev->dev;
++
++ /* prepare a net_dev structure to make the phy lib API happy */
++ netdev = alloc_etherdev(sizeof(*priv));
++ if (!netdev) {
++ dev_err(dev, "alloc_etherdev error\n");
++ err = -ENOMEM;
++ goto err_exit;
++ }
++ priv = netdev_priv(netdev);
++ priv->mc_dev = mc_dev;
++ priv->netdev = netdev;
++
++ SET_NETDEV_DEV(netdev, dev);
++ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id);
++
++ dev_set_drvdata(dev, priv);
++
++ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
++ if (err || !mc_dev->mc_io) {
++ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err);
++ err = -ENODEV;
++ goto err_free_netdev;
++ }
++
++ err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
++ &mc_dev->mc_handle);
++ if (err || !mc_dev->mc_handle) {
++ dev_err(dev, "dpmac_open error: %d\n", err);
++ err = -ENODEV;
++ goto err_free_mcp;
++ }
++
++ err = dpmac_get_attributes(mc_dev->mc_io, 0,
++ mc_dev->mc_handle, &priv->attr);
++ if (err) {
++ dev_err(dev, "dpmac_get_attributes err %d\n", err);
++ err = -EINVAL;
++ goto err_close;
++ }
++
++ err = check_dpmac_version(priv);
++ if (err)
++ goto err_close;
++
++ /* Look up the DPMAC node in the device-tree. */
++ dpmac_node = lookup_node(dev, priv->attr.id);
++ if (!dpmac_node) {
++ dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id);
++ err = -ENODEV;
++ goto err_close;
++ }
++
++ err = setup_irqs(mc_dev);
++ if (err) {
++ err = -EFAULT;
++ goto err_close;
++ }
++
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++ /* OPTIONAL, register netdev just to make it visible to the user */
++ netdev->netdev_ops = &dpaa2_mac_ndo_ops;
++ netdev->ethtool_ops = &dpaa2_mac_ethtool_ops;
++
++ /* phy starts up enabled so netdev should be up too */
++ netdev->flags |= IFF_UP;
++
++ err = register_netdev(priv->netdev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev error %d\n", err);
++ err = -ENODEV;
++ goto err_free_irq;
++ }
++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
++
++ /* probe the PHY as a fixed-link if the link type declared in DPC
++ * explicitly mandates this
++ */
++ if (priv->attr.link_type == DPMAC_LINK_TYPE_FIXED)
++ goto probe_fixed_link;
++
++ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) {
++ if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if];
++ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
++ phy_modes(if_mode), priv->attr.eth_if);
++ } else {
++ dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n",
++ priv->attr.eth_if);
++ goto probe_fixed_link;
++ }
++
++ /* try to connect to the PHY */
++ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
++ if (!phy_node) {
++ if (!phy_node) {
++ dev_err(dev, "dpmac node has no phy-handle property\n");
++ err = -ENODEV;
++ goto err_no_phy;
++ }
++ }
++ netdev->phydev = of_phy_connect(netdev, phy_node,
++ &dpaa2_mac_link_changed, 0, if_mode);
++ if (!netdev->phydev) {
++ /* No need for dev_err(); the kernel's loud enough as it is. */
++ dev_dbg(dev, "Can't of_phy_connect() now.\n");
++ /* We might be waiting for the MDIO MUX to probe, so defer
++ * our own probing.
++ */
++ err = -EPROBE_DEFER;
++ goto err_defer;
++ }
++ dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode));
++
++probe_fixed_link:
++ if (!netdev->phydev) {
++ struct fixed_phy_status status = {
++ .link = 1,
++ /* fixed-phys don't support 10Gbps speed for now */
++ .speed = 1000,
++ .duplex = 1,
++ };
++
++ /* try to register a fixed link phy */
++ netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1, NULL);
++ if (!netdev->phydev || IS_ERR(netdev->phydev)) {
++ dev_err(dev, "error trying to register fixed PHY\n");
++ /* So we don't crash unregister_netdev() later on */
++ netdev->phydev = NULL;
++ err = -EFAULT;
++ goto err_no_phy;
++ }
++ dev_info(dev, "Registered fixed PHY.\n");
++ }
++
++ /* start PHY state machine */
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++ dpaa2_mac_open(netdev);
++#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
++ phy_start(netdev->phydev);
++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
++ return 0;
++
++err_defer:
++err_no_phy:
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
++ unregister_netdev(netdev);
++err_free_irq:
++#endif
++ teardown_irqs(mc_dev);
++err_close:
++ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
++err_free_mcp:
++ fsl_mc_portal_free(mc_dev->mc_io);
++err_free_netdev:
++ free_netdev(netdev);
++err_exit:
++ return err;
++}
++
++static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev)
++{
++ struct device *dev = &mc_dev->dev;
++ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
++
++ unregister_netdev(priv->netdev);
++ teardown_irqs(priv->mc_dev);
++ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle);
++ fsl_mc_portal_free(priv->mc_dev->mc_io);
++ free_netdev(priv->netdev);
++
++ dev_set_drvdata(dev, NULL);
++ kfree(priv);
++
++ return 0;
++}
++
++static const struct fsl_mc_device_match_id dpaa2_mac_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpmac",
++ .ver_major = DPMAC_VER_MAJOR,
++ .ver_minor = DPMAC_VER_MINOR,
++ },
++ {}
++};
++
++static struct fsl_mc_driver dpaa2_mac_drv = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_mac_probe,
++ .remove = dpaa2_mac_remove,
++ .match_id_table = dpaa2_mac_match_id_table,
++};
++
++module_fsl_mc_driver(dpaa2_mac_drv);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver");
diff --git a/target/linux/layerscape/patches-4.4/7209-staging-fsl-dpaa2-mac-Interrupt-code-cleanup.patch b/target/linux/layerscape/patches-4.4/7209-staging-fsl-dpaa2-mac-Interrupt-code-cleanup.patch
new file mode 100644
index 0000000..9ed721f
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7209-staging-fsl-dpaa2-mac-Interrupt-code-cleanup.patch
@@ -0,0 +1,182 @@
+From bb42890533f9592e8d30654b4e0b19c3cf7caaec Mon Sep 17 00:00:00 2001
+From: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+Date: Fri, 1 Apr 2016 18:38:18 +0300
+Subject: [PATCH 209/226] staging: fsl-dpaa2/mac: Interrupt code cleanup
+
+Cleanup and a couple of minor fixes for the interrupt
+handling code:
+* Removed a few unnecessary checks, unify format for others
+* Don't print error/debug messages in interrupt handler
+* No need to explicitly disable DPMAC interrupts before
+configuring them
+* Use unlikely in interrupt handler routine error checks
+* if status register is zero or we're unable to read its value,
+return IRQ_NONE instead of IRQ_HANDLED
+* always clear the entire status register, not just the bit(s)
+that were treated
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+(cherry picked from commit 4b46eec16c56e4f453ca1558af9aceaf6ffe831a)
+(Stuart:resolved merge conflict)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/mac/mac.c | 77 ++++++++---------------------------
+ 1 file changed, 16 insertions(+), 61 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/mac/mac.c
++++ b/drivers/staging/fsl-dpaa2/mac/mac.c
+@@ -132,7 +132,7 @@ static void dpaa2_mac_link_changed(struc
+ }
+
+ /* IRQ bits that we handle */
+-static const u32 dpmac_irq_mask = DPMAC_IRQ_EVENT_LINK_CFG_REQ;
++static const u32 dpmac_irq_mask = DPMAC_IRQ_EVENT_LINK_CFG_REQ;
+
+ #ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+ static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
+@@ -345,16 +345,13 @@ static const struct ethtool_ops dpaa2_ma
+ };
+ #endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
+
+-static int configure_link(struct dpaa2_mac_priv *priv,
+- struct dpmac_link_cfg *cfg)
++static void configure_link(struct dpaa2_mac_priv *priv,
++ struct dpmac_link_cfg *cfg)
+ {
+ struct phy_device *phydev = priv->netdev->phydev;
+
+- if (!phydev) {
+- dev_warn(priv->netdev->dev.parent,
+- "asked to change PHY settings but PHY ref is NULL, ignoring\n");
+- return 0;
+- }
++ if (unlikely(!phydev))
++ return;
+
+ phydev->speed = cfg->rate;
+ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
+@@ -368,8 +365,6 @@ static int configure_link(struct dpaa2_m
+ }
+
+ phy_start_aneg(phydev);
+-
+- return 0;
+ }
+
+ static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg)
+@@ -378,53 +373,29 @@ static irqreturn_t dpaa2_mac_irq_handler
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
+ struct dpmac_link_cfg link_cfg;
+- u8 irq_index = DPMAC_IRQ_INDEX;
+- u32 status, clear = 0;
++ u32 status;
+ int err;
+
+- if (mc_dev->irqs[0]->msi_desc->irq != irq_num) {
+- dev_err(dev, "received unexpected interrupt %d!\n", irq_num);
+- goto err;
+- }
+-
+ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+- irq_index, &status);
+- if (err) {
+- dev_err(dev, "dpmac_get_irq_status err %d\n", err);
+- clear = ~0x0u;
+- goto out;
+- }
++ DPMAC_IRQ_INDEX, &status);
++ if (unlikely(err || !status))
++ return IRQ_NONE;
+
+ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
+ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
+- dev_dbg(dev, "DPMAC IRQ %d - LINK_CFG_REQ\n", irq_num);
+- clear |= DPMAC_IRQ_EVENT_LINK_CFG_REQ;
+-
+ err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ &link_cfg);
+- if (err) {
+- dev_err(dev, "dpmac_get_link_cfg err %d\n", err);
++ if (unlikely(err))
+ goto out;
+- }
+
+- err = configure_link(priv, &link_cfg);
+- if (err) {
+- dev_err(dev, "cannot configure link\n");
+- goto out;
+- }
++ configure_link(priv, &link_cfg);
+ }
+
+ out:
+- err = dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+- irq_index, clear);
+- if (err < 0)
+- dev_err(&mc_dev->dev, "dpmac_clear_irq_status() err %d\n", err);
++ dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ DPMAC_IRQ_INDEX, status);
+
+ return IRQ_HANDLED;
+-
+-err:
+- dev_warn(dev, "DPMAC IRQ %d was not handled!\n", irq_num);
+- return IRQ_NONE;
+ }
+
+ static int setup_irqs(struct fsl_mc_device *mc_dev)
+@@ -437,19 +408,6 @@ static int setup_irqs(struct fsl_mc_devi
+ return err;
+ }
+
+- err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+- DPMAC_IRQ_INDEX, dpmac_irq_mask);
+- if (err < 0) {
+- dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
+- goto free_irq;
+- }
+- err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
+- DPMAC_IRQ_INDEX, 0);
+- if (err) {
+- dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
+- goto free_irq;
+- }
+-
+ err = devm_request_threaded_irq(&mc_dev->dev,
+ mc_dev->irqs[0]->msi_desc->irq,
+ NULL, &dpaa2_mac_irq_handler,
+@@ -463,7 +421,7 @@ static int setup_irqs(struct fsl_mc_devi
+
+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPMAC_IRQ_INDEX, dpmac_irq_mask);
+- if (err < 0) {
++ if (err) {
+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
+ goto free_irq;
+ }
+@@ -490,12 +448,12 @@ static void teardown_irqs(struct fsl_mc_
+
+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPMAC_IRQ_INDEX, dpmac_irq_mask);
+- if (err < 0)
++ if (err)
+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
+
+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPMAC_IRQ_INDEX, 0);
+- if (err < 0)
++ if (err)
+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
+
+ devm_free_irq(&mc_dev->dev, mc_dev->irqs[0]->msi_desc->irq, &mc_dev->dev);
+@@ -562,9 +520,6 @@ static int dpaa2_mac_probe(struct fsl_mc
+ phy_interface_t if_mode;
+ int err = 0;
+
+- /* just being completely paranoid */
+- if (!mc_dev)
+- return -EFAULT;
+ dev = &mc_dev->dev;
+
+ /* prepare a net_dev structure to make the phy lib API happy */
diff --git a/target/linux/layerscape/patches-4.4/7210-staging-fsl-dpaa2-mac-Fix-unregister_netdev-issue.patch b/target/linux/layerscape/patches-4.4/7210-staging-fsl-dpaa2-mac-Fix-unregister_netdev-issue.patch
new file mode 100644
index 0000000..ea58283
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7210-staging-fsl-dpaa2-mac-Fix-unregister_netdev-issue.patch
@@ -0,0 +1,42 @@
+From e74b6010eca026625ba4e39c80620320ca777deb Mon Sep 17 00:00:00 2001
+From: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+Date: Tue, 5 Apr 2016 13:35:14 +0300
+Subject: [PATCH 210/226] staging: fsl-dpaa2/mac: Fix unregister_netdev issue
+
+We only register the netdevice associated with a mac object if
+ONFIG_FSL_DPAA2_MAC_NETDEV is set, but we always unregister it
+during device remove(). Fix this by ifdef-ing the unregister
+operation.
+
+Also ifdef the change in netdevice name as it only makes sense
+under this option.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+(cherry picked from commit dd6a5313e194168d46fef495a6e3bc5207801473)
+---
+ drivers/staging/fsl-dpaa2/mac/mac.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/staging/fsl-dpaa2/mac/mac.c
++++ b/drivers/staging/fsl-dpaa2/mac/mac.c
+@@ -534,7 +534,10 @@ static int dpaa2_mac_probe(struct fsl_mc
+ priv->netdev = netdev;
+
+ SET_NETDEV_DEV(netdev, dev);
++
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id);
++#endif
+
+ dev_set_drvdata(dev, priv);
+
+@@ -684,7 +687,9 @@ static int dpaa2_mac_remove(struct fsl_m
+ struct device *dev = &mc_dev->dev;
+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
+
++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+ unregister_netdev(priv->netdev);
++#endif
+ teardown_irqs(priv->mc_dev);
+ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle);
+ fsl_mc_portal_free(priv->mc_dev->mc_io);
diff --git a/target/linux/layerscape/patches-4.4/7211-staging-fsl-dpaa2-mac-Don-t-call-devm_free_irq.patch b/target/linux/layerscape/patches-4.4/7211-staging-fsl-dpaa2-mac-Don-t-call-devm_free_irq.patch
new file mode 100644
index 0000000..46ecaea
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7211-staging-fsl-dpaa2-mac-Don-t-call-devm_free_irq.patch
@@ -0,0 +1,42 @@
+From b4d01330c66cbab3563c58f66f73f55726c09aec Mon Sep 17 00:00:00 2001
+From: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+Date: Tue, 5 Apr 2016 17:54:14 +0300
+Subject: [PATCH 211/226] staging: fsl-dpaa2/mac: Don't call devm_free_irq
+
+MAC interrupts are registered with devm_request_threaded_irq(), so
+there's no need to explicitly unregister them in case of a probe
+error or at device remove, as the kernel will take care of that for us.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+(cherry picked from commit 58e0fd23ade4b13e0a3c7e5f201802013e12df1c)
+(Stuart: resolved merge conflict)
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/mac/mac.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/mac/mac.c
++++ b/drivers/staging/fsl-dpaa2/mac/mac.c
+@@ -429,13 +429,11 @@ static int setup_irqs(struct fsl_mc_devi
+ DPMAC_IRQ_INDEX, 1);
+ if (err) {
+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
+- goto unregister_irq;
++ goto free_irq;
+ }
+
+ return 0;
+
+-unregister_irq:
+- devm_free_irq(&mc_dev->dev, mc_dev->irqs[0]->msi_desc->irq, &mc_dev->dev);
+ free_irq:
+ fsl_mc_free_irqs(mc_dev);
+
+@@ -456,7 +454,6 @@ static void teardown_irqs(struct fsl_mc_
+ if (err)
+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
+
+- devm_free_irq(&mc_dev->dev, mc_dev->irqs[0]->msi_desc->irq, &mc_dev->dev);
+ fsl_mc_free_irqs(mc_dev);
+ }
+
diff --git a/target/linux/layerscape/patches-4.4/7212-staging-fsl-dpaa2-mac-Use-of_property_read_32.patch b/target/linux/layerscape/patches-4.4/7212-staging-fsl-dpaa2-mac-Use-of_property_read_32.patch
new file mode 100644
index 0000000..8ab6de9
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7212-staging-fsl-dpaa2-mac-Use-of_property_read_32.patch
@@ -0,0 +1,43 @@
+From e554a03fe11719db373be3c54ce8f230a98dd5e4 Mon Sep 17 00:00:00 2001
+From: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+Date: Wed, 6 Apr 2016 15:05:47 +0300
+Subject: [PATCH 212/226] staging: fsl-dpaa2/mac: Use of_property_read_32()
+
+Simplify reading of the dpmac id from device tree.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+(cherry picked from commit b0562bda063f95923bcd8b78dea84a6e0587d3da)
+---
+ drivers/staging/fsl-dpaa2/mac/mac.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/mac/mac.c
++++ b/drivers/staging/fsl-dpaa2/mac/mac.c
+@@ -461,9 +461,8 @@ static struct device_node *lookup_node(s
+ {
+ struct device_node *dpmacs, *dpmac = NULL;
+ struct device_node *mc_node = dev->of_node;
+- const void *id;
+- int lenp;
+- int dpmac_id_be32 = cpu_to_be32(dpmac_id);
++ u32 id;
++ int err;
+
+ dpmacs = of_find_node_by_name(mc_node, "dpmacs");
+ if (!dpmacs) {
+@@ -472,12 +471,10 @@ static struct device_node *lookup_node(s
+ }
+
+ while ((dpmac = of_get_next_child(dpmacs, dpmac))) {
+- id = of_get_property(dpmac, "reg", &lenp);
+- if (!id || lenp != sizeof(int)) {
+- dev_warn(dev, "Unsuitable reg property in dpmac node\n");
++ err = of_property_read_u32(dpmac, "reg", &id);
++ if (err)
+ continue;
+- }
+- if (*(int *)id == dpmac_id_be32)
++ if (id == dpmac_id)
+ return dpmac;
+ }
+
diff --git a/target/linux/layerscape/patches-4.4/7213-staging-fsl-dpaa2-mac-Remove-version-checks.patch b/target/linux/layerscape/patches-4.4/7213-staging-fsl-dpaa2-mac-Remove-version-checks.patch
new file mode 100644
index 0000000..2c7bb88
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7213-staging-fsl-dpaa2-mac-Remove-version-checks.patch
@@ -0,0 +1,61 @@
+From 3e4dc755337ca86d29c9f21f5225a77595aee032 Mon Sep 17 00:00:00 2001
+From: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+Date: Wed, 6 Apr 2016 12:12:06 +0300
+Subject: [PATCH 213/226] staging: fsl-dpaa2/mac: Remove version checks
+
+We intend to ensure backward compatibility with all MC versions
+going forward, so we don't require an exact version match anymore
+between MAC driver, DPMAC API version and DPMAC object version in
+MC firmware.
+
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu at nxp.com>
+(cherry picked from commit eafc210ef421fb0dca67b67bf1a2fe98cd060c31)
+---
+ drivers/staging/fsl-dpaa2/mac/mac.c | 29 ++---------------------------
+ 1 file changed, 2 insertions(+), 27 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/mac/mac.c
++++ b/drivers/staging/fsl-dpaa2/mac/mac.c
+@@ -481,30 +481,6 @@ static struct device_node *lookup_node(s
+ return NULL;
+ }
+
+-static int check_dpmac_version(struct dpaa2_mac_priv *priv)
+-{
+- struct device *dev = &priv->mc_dev->dev;
+- int mc_version = priv->attr.version.major;
+-
+- /* Check that the FLIB-defined version matches the one reported by MC */
+- if (mc_version != DPMAC_VER_MAJOR) {
+- dev_err(dev, "DPMAC FLIB version mismatch: MC says %d, we have %d\n",
+- mc_version, DPMAC_VER_MAJOR);
+- return -EINVAL;
+- }
+-
+- /* ... and that we actually support it */
+- if (mc_version < DPAA2_SUPPORTED_DPMAC_VERSION) {
+- dev_err(dev, "Unsupported DPMAC FLIB version (%d)\n",
+- mc_version);
+- return -EINVAL;
+- }
+-
+- dev_dbg(dev, "Using DPMAC FLIB version %d\n", mc_version);
+-
+- return 0;
+-}
+-
+ static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev)
+ {
+ struct device *dev;
+@@ -558,9 +534,8 @@ static int dpaa2_mac_probe(struct fsl_mc
+ goto err_close;
+ }
+
+- err = check_dpmac_version(priv);
+- if (err)
+- goto err_close;
++ dev_info_once(dev, "Using DPMAC API %d.%d\n",
++ priv->attr.version.major, priv->attr.version.minor);
+
+ /* Look up the DPMAC node in the device-tree. */
+ dpmac_node = lookup_node(dev, priv->attr.id);
diff --git a/target/linux/layerscape/patches-4.4/7214-staging-fsl-dpaa2-mac-match-id-cleanup.patch b/target/linux/layerscape/patches-4.4/7214-staging-fsl-dpaa2-mac-match-id-cleanup.patch
new file mode 100644
index 0000000..850d0e6
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7214-staging-fsl-dpaa2-mac-match-id-cleanup.patch
@@ -0,0 +1,26 @@
+From 137f5f17bad655024d18123b1be696ad6b9ec729 Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Wed, 15 Jun 2016 14:04:32 -0500
+Subject: [PATCH 214/226] staging: fsl-dpaa2/mac: match id cleanup
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/mac/mac.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/mac/mac.c
++++ b/drivers/staging/fsl-dpaa2/mac/mac.c
+@@ -670,12 +670,10 @@ static int dpaa2_mac_remove(struct fsl_m
+ return 0;
+ }
+
+-static const struct fsl_mc_device_match_id dpaa2_mac_match_id_table[] = {
++static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpmac",
+- .ver_major = DPMAC_VER_MAJOR,
+- .ver_minor = DPMAC_VER_MINOR,
+ },
+ {}
+ };
diff --git a/target/linux/layerscape/patches-4.4/7215-dpaa2-evb-Added-Edge-Virtual-Bridge-driver.patch b/target/linux/layerscape/patches-4.4/7215-dpaa2-evb-Added-Edge-Virtual-Bridge-driver.patch
new file mode 100644
index 0000000..344c2ae
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7215-dpaa2-evb-Added-Edge-Virtual-Bridge-driver.patch
@@ -0,0 +1,2918 @@
+From 54bcaca10728c1a1c8adfa48124ea79cce4ef929 Mon Sep 17 00:00:00 2001
+From: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+Date: Tue, 22 Sep 2015 08:43:08 +0300
+Subject: [PATCH 215/226] dpaa2-evb: Added Edge Virtual Bridge driver
+
+This is a commit of the cummulative, squashed dpaa2-evb patches.
+All the commit logs are preserved below.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+----------------------------------------------------------------
+
+dpaa2-evb: Added Edge Virtual Bridge driver
+
+This contains the following patches migrated from linux-v4.0:
+staging: fsl-dpaa2: evb: Added Edge Virtual Bridge driver
+staging: fsl-dpaa2: evb: Added ethtool port counters
+staging: fsl-dpaa2: evb: Include by default in configuration
+staging: fsl-dpaa2: evb: Rebasing onto kernel 4.0
+staging: fsl-dpaa2: evb: Port to MC-0.7 FLibs
+dpaa2-evb: Set carrier state on port open
+dpaa2-evb: Add support for link state update
+dpaa2-evb: Update flib to MC 8.0.1
+staging: fsl-mc: migrated remaining flibs for MC fw 8.0.0 (split)
+
+Inital patches have been signed-off by:
+Alex Marginean <alexandru.marginean at freescale.com>
+J. German Rivera <German.Rivera at freescale.com>
+Bogdan Hamciuc <bogdan.hamciuc at freescale.com>
+Razvan Stefanescu <razvan.stefanescu at freescale.com>
+
+And reviewed by:
+Stuart Yoder <stuart.yoder at freescale.com>
+
+Porting to linux-v4.1 requires changes related to iflink usage and
+ndo_bridge_getlink() parameters list.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+
+dpaa2-evb: Port to linux-v4.1
+
+Update iflink usage.
+Update evb_getlink() parameter list to match ndo_bridge_getlink().
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+
+dpaa2-evb: Add VLAN_8021Q dependency
+
+EVB traffic steering methods related to VLAN require VLAN support in kernel.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+
+dpaa2-evb: Update dpdmux binary interface to 5.0
+
+This corresponds to MC release 0.8.0.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+
+dpaa2-evb: Add support to set max frame length.
+
+All the packets bigger than max_frame_length will be dropped.
+
+Signed-off-by: Mihaela Panescu <mihaela.panescu at freescale.com>
+
+dpaa2-evb: resolve compile issues on uprev to 4.5
+
+-irq_number field no longer exists in fsl-mc interrupt
+ struct
+-netdev_master_upper_dev_link() has 2 new parameters, which
+ are set to NULL for now
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ MAINTAINERS | 6 +
+ drivers/staging/fsl-dpaa2/Kconfig | 1 +
+ drivers/staging/fsl-dpaa2/Makefile | 1 +
+ drivers/staging/fsl-dpaa2/evb/Kconfig | 8 +
+ drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 256 ++++++
+ drivers/staging/fsl-dpaa2/evb/dpdmux.c | 567 +++++++++++++
+ drivers/staging/fsl-dpaa2/evb/dpdmux.h | 724 +++++++++++++++++
+ drivers/staging/fsl-dpaa2/evb/evb.c | 1216 ++++++++++++++++++++++++++++
+ 9 files changed, 2789 insertions(+)
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h
+ create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -4560,6 +4560,12 @@ L: linux-kernel at vger.kernel.org
+ S: Maintained
+ F: drivers/staging/fsl-dpaa2/mac/
+
+++FREESCALE DPAA2 EDGE VIRTUAL BRIDGE DRIVER
++M: Alex Marginean <Alexandru.Marginean at freescale.com>
++L: linux-kernel at vger.kernel.org
++S: Maintained
++F: drivers/staging/fsl-dpaa2/evb/
++
+ FREEVXFS FILESYSTEM
+ M: Christoph Hellwig <hch at infradead.org>
+ W: ftp://ftp.openlinux.org/pub/people/hch/vxfs
+--- a/drivers/staging/fsl-dpaa2/Kconfig
++++ b/drivers/staging/fsl-dpaa2/Kconfig
+@@ -10,3 +10,4 @@ config FSL_DPAA2
+ # TODO move DPIO driver in-here?
+ source "drivers/staging/fsl-dpaa2/ethernet/Kconfig"
+ source "drivers/staging/fsl-dpaa2/mac/Kconfig"
++source "drivers/staging/fsl-dpaa2/evb/Kconfig"
+--- a/drivers/staging/fsl-dpaa2/Makefile
++++ b/drivers/staging/fsl-dpaa2/Makefile
+@@ -4,3 +4,4 @@
+
+ obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
+ obj-$(CONFIG_FSL_DPAA2_MAC) += mac/
++obj-$(CONFIG_FSL_DPAA2_EVB) += evb/
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
+@@ -0,0 +1,8 @@
++config FSL_DPAA2_EVB
++ tristate "DPAA2 Edge Virtual Bridge"
++ depends on FSL_MC_BUS && FSL_DPAA2 && FSL_DPAA2_ETH
++ select FSL_DPAA2_MAC
++ select VLAN_8021Q
++ default y
++ ---help---
++ Prototype driver for DPAA2 Edge Virtual Bridge.
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/Makefile
+@@ -0,0 +1,10 @@
++
++obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o
++
++dpaa2-evb-objs := evb.o dpdmux.o
++
++all:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
++
++clean:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
+@@ -0,0 +1,256 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPDMUX_CMD_H
++#define _FSL_DPDMUX_CMD_H
++
++/* DPDMUX Version */
++#define DPDMUX_VER_MAJOR 5
++#define DPDMUX_VER_MINOR 0
++
++/* Command IDs */
++#define DPDMUX_CMDID_CLOSE 0x800
++#define DPDMUX_CMDID_OPEN 0x806
++#define DPDMUX_CMDID_CREATE 0x906
++#define DPDMUX_CMDID_DESTROY 0x900
++
++#define DPDMUX_CMDID_ENABLE 0x002
++#define DPDMUX_CMDID_DISABLE 0x003
++#define DPDMUX_CMDID_GET_ATTR 0x004
++#define DPDMUX_CMDID_RESET 0x005
++#define DPDMUX_CMDID_IS_ENABLED 0x006
++
++#define DPDMUX_CMDID_SET_IRQ 0x010
++#define DPDMUX_CMDID_GET_IRQ 0x011
++#define DPDMUX_CMDID_SET_IRQ_ENABLE 0x012
++#define DPDMUX_CMDID_GET_IRQ_ENABLE 0x013
++#define DPDMUX_CMDID_SET_IRQ_MASK 0x014
++#define DPDMUX_CMDID_GET_IRQ_MASK 0x015
++#define DPDMUX_CMDID_GET_IRQ_STATUS 0x016
++#define DPDMUX_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH 0x0a1
++
++#define DPDMUX_CMDID_UL_RESET_COUNTERS 0x0a3
++
++#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES 0x0a7
++#define DPDMUX_CMDID_IF_GET_ATTR 0x0a8
++
++#define DPDMUX_CMDID_IF_ADD_L2_RULE 0x0b0
++#define DPDMUX_CMDID_IF_REMOVE_L2_RULE 0x0b1
++#define DPDMUX_CMDID_IF_GET_COUNTER 0x0b2
++#define DPDMUX_CMDID_IF_SET_LINK_CFG 0x0b3
++#define DPDMUX_CMDID_IF_GET_LINK_STATE 0x0b4
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_OPEN(cmd, dpdmux_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmux_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, enum dpdmux_method, cfg->method);\
++ MC_CMD_OP(cmd, 0, 8, 8, enum dpdmux_manip, cfg->manip);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\
++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_dmat_entries);\
++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_mc_groups);\
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.max_vlan_ids);\
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) \
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++#define DPDMUX_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 8, enum dpdmux_method, attr->method);\
++ MC_RSP_OP(cmd, 0, 8, 8, enum dpdmux_manip, attr->manip);\
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->num_ifs);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->mem_size);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\
++ MC_RSP_OP(cmd, 4, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 4, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 4, enum dpdmux_accepted_frames_type, cfg->type);\
++ MC_CMD_OP(cmd, 0, 20, 4, enum dpdmux_unaccepted_frames_action, \
++ cfg->unaccept_act);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_IF_GET_ATTR(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_IF_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 56, 4, enum dpdmux_accepted_frames_type, \
++ attr->accept_frame_type);\
++ MC_RSP_OP(cmd, 0, 24, 1, int, attr->enabled);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rate);\
++} while (0)
++
++#define DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, l2_rule) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\
++} while (0)
++
++#define DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, l2_rule) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, enum dpdmux_counter_type, counter_type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_IF_GET_COUNTER(cmd, counter) \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
++} while (0)
++
++#endif /* _FSL_DPDMUX_CMD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
+@@ -0,0 +1,567 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../fsl-mc/include/mc-sys.h"
++#include "../../fsl-mc/include/mc-cmd.h"
++#include "dpdmux.h"
++#include "dpdmux-cmd.h"
++
++int dpdmux_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdmux_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPDMUX_CMD_OPEN(cmd, dpdmux_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpdmux_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpdmux_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPDMUX_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpdmux_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpdmux_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpdmux_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpdmux_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdmux_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t max_frame_length)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_accepted_frames *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_if_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_GET_ATTR(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_IF_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_l2_rule *rule)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, rule);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_l2_rule *rule)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, rule);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpdmux_counter_type counter_type,
++ uint64_t *counter)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_IF_GET_COUNTER(cmd, *counter);
++
++ return 0;
++}
++
++int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_link_state *state)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state);
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
+@@ -0,0 +1,724 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPDMUX_H
++#define __FSL_DPDMUX_H
++
++#include "../../fsl-mc/include/net.h"
++
++struct fsl_mc_io;
++
++/* Data Path Demux API
++ * Contains API for handling DPDMUX topology and functionality
++ */
++
++/**
++ * dpdmux_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpdmux_id: DPDMUX unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpdmux_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdmux_id,
++ uint16_t *token);
++
++/**
++ * dpdmux_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * DPDMUX general options
++ */
++
++/**
++ * Enable bridging between internal interfaces
++ */
++#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
++
++#define DPDMUX_IRQ_INDEX_IF 0x0000
++#define DPDMUX_IRQ_INDEX 0x0001
++
++/**
++ * IRQ event - Indicates that the link state changed
++ */
++#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
++
++/**
++ * enum dpdmux_manip - DPDMUX manipulation operations
++ * @DPDMUX_MANIP_NONE: No manipulation on frames
++ * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
++ */
++enum dpdmux_manip {
++ DPDMUX_MANIP_NONE = 0x0,
++ DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
++};
++
++/**
++ * enum dpdmux_method - DPDMUX method options
++ * @DPDMUX_METHOD_NONE: no DPDMUX method
++ * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
++ * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
++ * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
++ * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
++ */
++enum dpdmux_method {
++ DPDMUX_METHOD_NONE = 0x0,
++ DPDMUX_METHOD_C_VLAN_MAC = 0x1,
++ DPDMUX_METHOD_MAC = 0x2,
++ DPDMUX_METHOD_C_VLAN = 0x3,
++ DPDMUX_METHOD_S_VLAN = 0x4
++};
++
++/**
++ * struct dpdmux_cfg - DPDMUX configuration parameters
++ * @method: Defines the operation method for the DPDMUX address table
++ * @manip: Required manipulation operation
++ * @num_ifs: Number of interfaces (excluding the uplink interface)
++ * @adv: Advanced parameters; default is all zeros;
++ * use this structure to change default settings
++ */
++struct dpdmux_cfg {
++ enum dpdmux_method method;
++ enum dpdmux_manip manip;
++ uint16_t num_ifs;
++ /**
++ * struct adv - Advanced parameters
++ * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
++ * @max_dmat_entries: Maximum entries in DPDMUX address table
++ * 0 - indicates default: 64 entries per interface.
++ * @max_mc_groups: Number of multicast groups in DPDMUX table
++ * 0 - indicates default: 32 multicast groups
++ * @max_vlan_ids: max vlan ids allowed in the system -
++ * relevant only case of working in mac+vlan method.
++ * 0 - indicates default 16 vlan ids.
++ */
++ struct {
++ uint64_t options;
++ uint16_t max_dmat_entries;
++ uint16_t max_mc_groups;
++ uint16_t max_vlan_ids;
++ } adv;
++};
++
++/**
++ * dpdmux_create() - Create the DPDMUX object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPDMUX object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpdmux_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpdmux_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpdmux_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmux_enable() - Enable DPDMUX functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmux_disable() - Disable DPDMUX functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpdmux_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpdmux_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpdmux_set_irq() - Set IRQ information for the DPDMUX to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpdmux_irq_cfg *irq_cfg);
++
++/**
++ * dpdmux_get_irq() - Get IRQ information from the DPDMUX.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpdmux_irq_cfg *irq_cfg);
++
++/**
++ * dpdmux_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpdmux_get_irq_enable() - Get overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpdmux_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpdmux_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpdmux_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpdmux_attr - Structure representing DPDMUX attributes
++ * @id: DPDMUX object ID
++ * @version: DPDMUX version
++ * @options: Configuration options (bitmap)
++ * @method: DPDMUX address table method
++ * @manip: DPDMUX manipulation type
++ * @num_ifs: Number of interfaces (excluding the uplink interface)
++ * @mem_size: DPDMUX frame storage memory size
++ */
++struct dpdmux_attr {
++ int id;
++ /**
++ * struct version - DPDMUX version
++ * @major: DPDMUX major version
++ * @minor: DPDMUX minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint64_t options;
++ enum dpdmux_method method;
++ enum dpdmux_manip manip;
++ uint16_t num_ifs;
++ uint16_t mem_size;
++};
++
++/**
++ * dpdmux_get_attributes() - Retrieve DPDMUX attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdmux_attr *attr);
++
++/**
++ * dpdmux_ul_set_max_frame_length() - Set the maximum frame length in DPDMUX
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @max_frame_length: The required maximum frame length
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t max_frame_length);
++
++/**
++ * enum dpdmux_counter_type - Counter types
++ * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
++ * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
++ * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
++ * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
++ * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
++ * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
++ * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
++ * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
++ * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
++ * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
++ * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
++ */
++enum dpdmux_counter_type {
++ DPDMUX_CNT_ING_FRAME = 0x0,
++ DPDMUX_CNT_ING_BYTE = 0x1,
++ DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
++ DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
++ DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
++ DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
++ DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
++ DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
++ DPDMUX_CNT_EGR_FRAME = 0x8,
++ DPDMUX_CNT_EGR_BYTE = 0x9,
++ DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
++};
++
++/**
++ * enum dpdmux_accepted_frames_type - DPDMUX frame types
++ * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
++ * priority-tagged frames
++ * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
++ * priority-tagged frames that are received on this
++ * interface
++ * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
++ * received on this interface are accepted
++ */
++enum dpdmux_accepted_frames_type {
++ DPDMUX_ADMIT_ALL = 0,
++ DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
++ DPDMUX_ADMIT_ONLY_UNTAGGED = 2
++};
++
++/**
++ * enum dpdmux_action - DPDMUX action for un-accepted frames
++ * @DPDMUX_ACTION_DROP: Drop un-accepted frames
++ * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
++ * control interface
++ */
++enum dpdmux_action {
++ DPDMUX_ACTION_DROP = 0,
++ DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
++};
++
++/**
++ * struct dpdmux_accepted_frames - Frame types configuration
++ * @type: Defines ingress accepted frames
++ * @unaccept_act: Defines action on frames not accepted
++ */
++struct dpdmux_accepted_frames {
++ enum dpdmux_accepted_frames_type type;
++ enum dpdmux_action unaccept_act;
++};
++
++/**
++ * dpdmux_if_set_accepted_frames() - Set the accepted frame types
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
++ * @cfg: Frame types configuration
++ *
++ * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
++ * priority-tagged frames are discarded.
++ * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
++ * priority-tagged frames are accepted.
++ * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
++ * untagged and priority-tagged frame are accepted;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_accepted_frames *cfg);
++
++/**
++ * struct dpdmux_if_attr - Structure representing frame types configuration
++ * @rate: Configured interface rate (in bits per second)
++ * @enabled: Indicates if interface is enabled
++ * @accept_frame_type: Indicates type of accepted frames for the interface
++ */
++struct dpdmux_if_attr {
++ uint32_t rate;
++ int enabled;
++ enum dpdmux_accepted_frames_type accept_frame_type;
++};
++
++/**
++ * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
++ * @attr: Interface attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_if_attr *attr);
++
++/**
++ * struct dpdmux_l2_rule - Structure representing L2 rule
++ * @mac_addr: MAC address
++ * @vlan_id: VLAN ID
++ */
++struct dpdmux_l2_rule {
++ uint8_t mac_addr[6];
++ uint16_t vlan_id;
++};
++
++/**
++ * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Destination interface ID
++ * @rule: L2 rule
++ *
++ * Function removes a L2 rule from DPDMUX table
++ * or adds an interface to an existing multicast address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_l2_rule *rule);
++
++/**
++ * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Destination interface ID
++ * @rule: L2 rule
++ *
++ * Function adds a L2 rule into DPDMUX table
++ * or adds an interface to an existing multicast address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_l2_rule *rule);
++
++/**
++* dpdmux_if_get_counter() - Functions obtains specific counter of an interface
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPDMUX object
++* @if_id: Interface Id
++* @counter_type: counter type
++* @counter: Returned specific counter information
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpdmux_counter_type counter_type,
++ uint64_t *counter);
++
++/**
++* dpdmux_ul_reset_counters() - Function resets the uplink counter
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPDMUX object
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
++ */
++struct dpdmux_link_cfg {
++ uint32_t rate;
++ uint64_t options;
++};
++
++/**
++ * dpdmux_if_set_link_cfg() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_link_cfg *cfg);
++/**
++ * struct dpdmux_link_state - Structure representing DPDMUX link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
++ * @up: 0 - down, 1 - up
++ */
++struct dpdmux_link_state {
++ uint32_t rate;
++ uint64_t options;
++ int up;
++};
++
++/**
++ * dpdmux_if_get_link_state - Return the link state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @state: link state
++ *
++ * @returns '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_link_state *state);
++
++#endif /* __FSL_DPDMUX_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/evb/evb.c
+@@ -0,0 +1,1216 @@
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_vlan.h>
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++
++#include <uapi/linux/if_bridge.h>
++#include <net/netlink.h>
++
++#include "../../fsl-mc/include/mc.h"
++
++#include "dpdmux.h"
++#include "dpdmux-cmd.h"
++
++/* IRQ index */
++#define DPDMUX_MAX_IRQ_NUM 2
++
++/* MAX FRAME LENGTH (currently 10k) */
++#define EVB_MAX_FRAME_LENGTH (10 * 1024)
++/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */
++#define EVB_MIN_FRAME_LENGTH 68
++
++struct evb_port_priv {
++ struct net_device *netdev;
++ struct list_head list;
++ u16 port_index;
++ struct evb_priv *evb_priv;
++ u8 vlans[VLAN_VID_MASK+1];
++};
++
++struct evb_priv {
++ /* keep first */
++ struct evb_port_priv uplink;
++
++ struct fsl_mc_io *mc_io;
++ struct list_head port_list;
++ struct dpdmux_attr attr;
++ uint16_t mux_handle;
++ int dev_id;
++};
++
++static int _evb_port_carrier_state_sync(struct net_device *netdev)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct dpdmux_link_state state;
++ int err;
++
++ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index, &state);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err);
++ return err;
++ }
++
++ WARN_ONCE(state.up > 1, "Garbage read into link_state");
++
++ if (state.up)
++ netif_carrier_on(port_priv->netdev);
++ else
++ netif_carrier_off(port_priv->netdev);
++
++ return 0;
++}
++
++static int evb_port_open(struct net_device *netdev)
++{
++ int err;
++
++ /* FIXME: enable port when support added */
++
++ err = _evb_port_carrier_state_sync(netdev);
++ if (err) {
++ netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n",
++ err);
++ return err;
++ }
++
++ return 0;
++}
++
++static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev)
++{
++ /* we don't support I/O for now, drop the frame */
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++}
++
++static int evb_links_state_update(struct evb_priv *priv)
++{
++ struct evb_port_priv *port_priv;
++ struct list_head *pos;
++ int err;
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct evb_port_priv, list);
++
++ err = _evb_port_carrier_state_sync(port_priv->netdev);
++ if (err)
++ netdev_err(port_priv->netdev,
++ "_evb_port_carrier_state_sync err %d\n",
++ err);
++ }
++
++ return 0;
++}
++
++static irqreturn_t evb_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev);
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++ struct fsl_mc_io *io = priv->mc_io;
++ uint16_t token = priv->mux_handle;
++ int irq_index = DPDMUX_IRQ_INDEX_IF;
++ uint32_t status = 0, clear = 0;
++ int err;
++
++ /* Sanity check */
++ if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index]))
++ goto out;
++ if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != irq_num))
++ goto out;
++
++ err = dpdmux_get_irq_status(io, 0, token, irq_index, &status);
++ if (unlikely(err)) {
++ netdev_err(netdev, "Can't get irq status (err %d)", err);
++ clear = 0xffffffff;
++ goto out;
++ }
++
++ /* FIXME clear irq status */
++
++ if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) {
++ clear |= DPDMUX_IRQ_EVENT_LINK_CHANGED;
++
++ err = evb_links_state_update(priv);
++ if (unlikely(err))
++ goto out;
++ }
++out:
++ err = dpdmux_clear_irq_status(io, 0, token, irq_index, clear);
++ if (unlikely(err))
++ netdev_err(netdev, "Can't clear irq status (err %d)", err);
++ return IRQ_HANDLED;
++}
++
++static int evb_setup_irqs(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev = &evb_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++ int err = 0;
++ struct fsl_mc_device_irq *irq;
++ const int irq_index = DPDMUX_IRQ_INDEX_IF;
++ uint32_t mask = ~0x0u; /* FIXME: unmask handled irqs */
++
++ err = fsl_mc_allocate_irqs(evb_dev);
++ if (unlikely(err)) {
++ dev_err(dev, "MC irqs allocation failed\n");
++ return err;
++ }
++
++ if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) {
++ err = -EINVAL;
++ goto free_irq;
++ }
++
++ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
++ irq_index, 0);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_set_irq_enable err %d\n", err);
++ goto free_irq;
++ }
++
++ irq = evb_dev->irqs[irq_index];
++
++ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
++ evb_irq0_handler,
++ _evb_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(dev), dev);
++ if (unlikely(err)) {
++ dev_err(dev, "devm_request_threaded_irq(): %d", err);
++ goto free_irq;
++ }
++
++ err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle,
++ irq_index, mask);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_set_irq_mask(): %d", err);
++ goto free_devm_irq;
++ }
++
++ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
++ irq_index, 1);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_set_irq_enable(): %d", err);
++ goto free_devm_irq;
++ }
++
++ return 0;
++
++free_devm_irq:
++ devm_free_irq(dev, irq->msi_desc->irq, dev);
++free_irq:
++ fsl_mc_free_irqs(evb_dev);
++ return err;
++}
++
++static void evb_teardown_irqs(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev = &evb_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++
++ dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
++ DPDMUX_IRQ_INDEX_IF, 0);
++
++ devm_free_irq(dev,
++ evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq,
++ dev);
++ fsl_mc_free_irqs(evb_dev);
++}
++
++static int evb_port_add_rule(struct net_device *netdev,
++ const unsigned char *addr, u16 vid)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct dpdmux_l2_rule rule = { .vlan_id = vid };
++ int err;
++
++ if (addr)
++ ether_addr_copy(rule.mac_addr, addr);
++
++ err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index, &rule);
++ if (unlikely(err))
++ netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err);
++ return err;
++}
++
++static int evb_port_del_rule(struct net_device *netdev,
++ const unsigned char *addr, u16 vid)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct dpdmux_l2_rule rule = { .vlan_id = vid };
++ int err;
++
++ if (addr)
++ ether_addr_copy(rule.mac_addr, addr);
++
++ err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index, &rule);
++ if (unlikely(err))
++ netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err);
++ return err;
++}
++
++static bool _lookup_address(struct net_device *netdev,
++ const unsigned char *addr)
++{
++ struct netdev_hw_addr *ha;
++ struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ?
++ &netdev->uc : &netdev->mc;
++
++ netif_addr_lock_bh(netdev);
++ list_for_each_entry(ha, &list->list, list) {
++ if (ether_addr_equal(ha->addr, addr)) {
++ netif_addr_unlock_bh(netdev);
++ return true;
++ }
++ }
++ netif_addr_unlock_bh(netdev);
++ return false;
++}
++
++static inline int evb_port_fdb_prep(struct nlattr *tb[],
++ struct net_device *netdev,
++ const unsigned char *addr, u16 *vid,
++ bool del)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct evb_priv *evb_priv = port_priv->evb_priv;
++
++ *vid = 0;
++
++ if (evb_priv->attr.method != DPDMUX_METHOD_MAC &&
++ evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) {
++ netdev_err(netdev,
++ "EVB mode does not support MAC classification\n");
++ return -EOPNOTSUPP;
++ }
++
++ /* check if the address is configured on this port */
++ if (_lookup_address(netdev, addr)) {
++ if (!del)
++ return -EEXIST;
++ } else {
++ if (del)
++ return -ENOENT;
++ }
++
++ if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
++ if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
++ netdev_err(netdev, "invalid vlan size %d\n",
++ nla_len(tb[NDA_VLAN]));
++ return -EINVAL;
++ }
++
++ *vid = nla_get_u16(tb[NDA_VLAN]);
++
++ if (!*vid || *vid >= VLAN_VID_MASK) {
++ netdev_err(netdev, "invalid vid value 0x%04x\n", *vid);
++ return -EINVAL;
++ }
++ } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
++ netdev_err(netdev,
++ "EVB mode requires explicit VLAN configuration\n");
++ return -EINVAL;
++ } else if (tb[NDA_VLAN]) {
++ netdev_warn(netdev, "VLAN not supported, argument ignored\n");
++ }
++
++ return 0;
++}
++
++static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *netdev,
++ const unsigned char *addr, u16 vid, u16 flags)
++{
++ u16 _vid;
++ int err;
++
++ /* TODO: add replace support when added to iproute bridge */
++ if (!(flags & NLM_F_REQUEST)) {
++ netdev_err(netdev,
++ "evb_port_fdb_add unexpected flags value %08x\n",
++ flags);
++ return -EINVAL;
++ }
++
++ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0);
++ if (unlikely(err))
++ return err;
++
++
++ err = evb_port_add_rule(netdev, addr, _vid);
++ if (unlikely(err))
++ return err;
++
++ if (is_unicast_ether_addr(addr)) {
++ err = dev_uc_add(netdev, addr);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dev_uc_add err %d\n", err);
++ return err;
++ }
++ } else {
++ err = dev_mc_add(netdev, addr);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dev_mc_add err %d\n", err);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *netdev,
++ const unsigned char *addr, u16 vid)
++{
++ u16 _vid;
++ int err;
++
++ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1);
++ if (unlikely(err))
++ return err;
++
++ err = evb_port_del_rule(netdev, addr, _vid);
++ if (unlikely(err))
++ return err;
++
++ if (is_unicast_ether_addr(addr)) {
++ err = dev_uc_del(netdev, addr);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dev_uc_del err %d\n", err);
++ return err;
++ }
++ } else {
++ err = dev_mc_del(netdev, addr);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dev_mc_del err %d\n", err);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static int evb_change_mtu(struct net_device *netdev,
++ int mtu)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct evb_priv *evb_priv = port_priv->evb_priv;
++ struct list_head *pos;
++ int err = 0;
++
++ /* This operation is not permitted on downlinks */
++ if (port_priv->port_index > 0)
++ return -EPERM;
++
++ if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) {
++ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
++ mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH);
++ return -EINVAL;
++ }
++
++ err = dpdmux_ul_set_max_frame_length(evb_priv->mc_io,
++ 0,
++ evb_priv->mux_handle,
++ (uint16_t)mtu);
++
++ if (unlikely(err)) {
++ netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
++ err);
++ return err;
++ }
++
++ /* Update the max frame length for downlinks */
++ list_for_each(pos, &evb_priv->port_list) {
++ port_priv = list_entry(pos, struct evb_port_priv, list);
++ port_priv->netdev->mtu = mtu;
++ }
++
++ netdev->mtu = mtu;
++ return 0;
++}
++
++static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = {
++ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
++ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
++ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
++ .len = sizeof(struct bridge_vlan_info), },
++};
++
++static int evb_setlink_af_spec(struct net_device *netdev,
++ struct nlattr **tb)
++{
++ struct bridge_vlan_info *vinfo;
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ int err = 0;
++
++ if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
++ netdev_err(netdev, "no VLAN INFO in nlmsg\n");
++ return -EOPNOTSUPP;
++ }
++
++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++
++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
++ return -EINVAL;
++
++ err = evb_port_add_rule(netdev, NULL, vinfo->vid);
++ if (unlikely(err))
++ return err;
++
++ port_priv->vlans[vinfo->vid] = 1;
++
++ return 0;
++}
++
++static int evb_setlink(struct net_device *netdev,
++ struct nlmsghdr *nlh,
++ u16 flags)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct evb_priv *evb_priv = port_priv->evb_priv;
++ struct nlattr *attr;
++ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
++ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX+1];
++ int err = 0;
++
++ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
++ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
++ netdev_err(netdev,
++ "EVB mode does not support VLAN only classification\n");
++ return -EOPNOTSUPP;
++ }
++
++ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (attr) {
++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
++ ifla_br_policy);
++ if (unlikely(err)) {
++ netdev_err(netdev,
++ "nla_parse_nested for br_policy err %d\n",
++ err);
++ return err;
++ }
++
++ err = evb_setlink_af_spec(netdev, tb);
++ return err;
++ }
++
++ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n");
++ return -EOPNOTSUPP;
++}
++
++static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct evb_priv *evb_priv = port_priv->evb_priv;
++ u8 operstate = netif_running(netdev) ?
++ netdev->operstate : IF_OPER_DOWN;
++ int iflink;
++ int err;
++
++ err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
++ if (unlikely(err))
++ goto nla_put_err;
++ if (netdev->addr_len) {
++ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
++ netdev->dev_addr);
++ if (unlikely(err))
++ goto nla_put_err;
++ }
++
++ iflink = dev_get_iflink(netdev);
++ if (netdev->ifindex != iflink) {
++ err = nla_put_u32(skb, IFLA_LINK, iflink);
++ if (unlikely(err))
++ goto nla_put_err;
++ }
++
++ return 0;
++
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ return err;
++}
++
++static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct nlattr *nest;
++ int err;
++
++ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
++ if (!nest) {
++ netdev_err(netdev, "nla_nest_start failed\n");
++ return -ENOMEM;
++ }
++
++ err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0);
++ if (unlikely(err))
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1);
++ if (unlikely(err))
++ goto nla_put_err;
++ nla_nest_end(skb, nest);
++
++ return 0;
++
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ nla_nest_cancel(skb, nest);
++ return err;
++}
++
++static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct nlattr *nest;
++ struct bridge_vlan_info vinfo;
++ const u8 *vlans = port_priv->vlans;
++ u16 i;
++ int err;
++
++ nest = nla_nest_start(skb, IFLA_AF_SPEC);
++ if (!nest) {
++ netdev_err(netdev, "nla_nest_start failed");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < VLAN_VID_MASK+1; i++) {
++ if (!vlans[i])
++ continue;
++
++ vinfo.flags = 0;
++ vinfo.vid = i;
++
++ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
++ sizeof(vinfo), &vinfo);
++ if (unlikely(err))
++ goto nla_put_err;
++ }
++
++ nla_nest_end(skb, nest);
++
++ return 0;
++
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ nla_nest_cancel(skb, nest);
++ return err;
++}
++
++static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq,
++ struct net_device *netdev, u32 filter_mask, int nlflags)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ struct evb_priv *evb_priv = port_priv->evb_priv;
++ struct ifinfomsg *hdr;
++ struct nlmsghdr *nlh;
++ int err;
++
++ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
++ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
++ return 0;
++ }
++
++ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
++ if (!nlh)
++ return -EMSGSIZE;
++
++ hdr = nlmsg_data(nlh);
++ memset(hdr, 0, sizeof(*hdr));
++ hdr->ifi_family = AF_BRIDGE;
++ hdr->ifi_type = netdev->type;
++ hdr->ifi_index = netdev->ifindex;
++ hdr->ifi_flags = dev_get_flags(netdev);
++
++ err = __nla_put_netdev(skb, netdev);
++ if (unlikely(err))
++ goto nla_put_err;
++
++ err = __nla_put_port(skb, netdev);
++ if (unlikely(err))
++ goto nla_put_err;
++
++ /* Check if the VID information is requested */
++ if (filter_mask & RTEXT_FILTER_BRVLAN) {
++ err = __nla_put_vlan(skb, netdev);
++ if (unlikely(err))
++ goto nla_put_err;
++ }
++
++ nlmsg_end(skb, nlh);
++ return skb->len;
++
++nla_put_err:
++ nlmsg_cancel(skb, nlh);
++ return -EMSGSIZE;
++}
++
++static int evb_dellink(struct net_device *netdev,
++ struct nlmsghdr *nlh,
++ u16 flags)
++{
++ struct nlattr *tb[IFLA_BRIDGE_MAX+1];
++ struct nlattr *spec;
++ struct bridge_vlan_info *vinfo;
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ int err = 0;
++
++ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (!spec)
++ return 0;
++
++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
++ if (unlikely(err))
++ return err;
++
++ if (!tb[IFLA_BRIDGE_VLAN_INFO])
++ return -EOPNOTSUPP;
++
++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++
++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
++ return -EINVAL;
++
++ err = evb_port_del_rule(netdev, NULL, vinfo->vid);
++ if (unlikely(err)) {
++ netdev_err(netdev, "evb_port_del_rule err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vinfo->vid] = 0;
++
++ return 0;
++}
++
++static struct rtnl_link_stats64 *
++evb_port_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *storage)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ u64 tmp;
++ int err;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_ING_FRAME, &storage->rx_packets);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_ING_BYTE, &storage->rx_bytes);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_ING_FLTR_FRAME, &tmp);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_ING_FRAME_DISCARD,
++ &storage->rx_dropped);
++ if (unlikely(err)) {
++ storage->rx_dropped = tmp;
++ goto error;
++ }
++ storage->rx_dropped += tmp;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_ING_MCAST_FRAME,
++ &storage->multicast);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_EGR_FRAME, &storage->tx_packets);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes);
++ if (unlikely(err))
++ goto error;
++
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ DPDMUX_CNT_EGR_FRAME_DISCARD,
++ &storage->tx_dropped);
++ if (unlikely(err))
++ goto error;
++
++ return storage;
++
++error:
++ netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
++ return storage;
++}
++
++static const struct net_device_ops evb_port_ops = {
++ .ndo_open = &evb_port_open,
++
++ .ndo_start_xmit = &evb_dropframe,
++
++ .ndo_fdb_add = &evb_port_fdb_add,
++ .ndo_fdb_del = &evb_port_fdb_del,
++
++ .ndo_get_stats64 = &evb_port_get_stats,
++ .ndo_change_mtu = &evb_change_mtu,
++};
++
++static struct {
++ enum dpdmux_counter_type id;
++ char name[ETH_GSTRING_LEN];
++} evb_ethtool_counters[] = {
++ {DPDMUX_CNT_ING_FRAME, "rx frames"},
++ {DPDMUX_CNT_ING_BYTE, "rx bytes"},
++ {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"},
++ {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
++ {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
++ {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
++ {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
++ {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
++ {DPDMUX_CNT_EGR_FRAME, "tx frames"},
++ {DPDMUX_CNT_EGR_BYTE, "tx bytes"},
++ {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
++};
++
++static int evb_ethtool_get_sset_count(struct net_device *dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(evb_ethtool_counters);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static void evb_ethtool_get_strings(struct net_device *netdev,
++ u32 stringset, u8 *data)
++{
++ int i;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ evb_ethtool_counters[i].name, ETH_GSTRING_LEN);
++ break;
++ }
++}
++
++static void evb_ethtool_get_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ struct evb_port_priv *port_priv = netdev_priv(netdev);
++ int i;
++ int err;
++
++ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) {
++ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
++ 0,
++ port_priv->evb_priv->mux_handle,
++ port_priv->port_index,
++ evb_ethtool_counters[i].id,
++ &data[i]);
++ if (err)
++ netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n",
++ evb_ethtool_counters[i].name, err);
++ }
++}
++
++static const struct ethtool_ops evb_port_ethtool_ops = {
++ .get_strings = &evb_ethtool_get_strings,
++ .get_ethtool_stats = &evb_ethtool_get_stats,
++ .get_sset_count = &evb_ethtool_get_sset_count,
++};
++
++static int evb_open(struct net_device *netdev)
++{
++ struct evb_priv *priv = netdev_priv(netdev);
++ int err = 0;
++
++ err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle);
++ if (unlikely(err))
++ netdev_err(netdev, "dpdmux_enable err %d\n", err);
++
++ return err;
++}
++
++static int evb_close(struct net_device *netdev)
++{
++ struct evb_priv *priv = netdev_priv(netdev);
++ int err = 0;
++
++ err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle);
++ if (unlikely(err))
++ netdev_err(netdev, "dpdmux_disable err %d\n", err);
++
++ return err;
++}
++
++static const struct net_device_ops evb_ops = {
++ .ndo_start_xmit = &evb_dropframe,
++ .ndo_open = &evb_open,
++ .ndo_stop = &evb_close,
++
++ .ndo_bridge_setlink = &evb_setlink,
++ .ndo_bridge_getlink = &evb_getlink,
++ .ndo_bridge_dellink = &evb_dellink,
++
++ .ndo_get_stats64 = &evb_port_get_stats,
++ .ndo_change_mtu = &evb_change_mtu,
++};
++
++static int evb_takedown(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev = &evb_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++ int err;
++
++ err = dpdmux_close(priv->mc_io, 0, priv->mux_handle);
++ if (unlikely(err))
++ dev_warn(dev, "dpdmux_close err %d\n", err);
++
++ return 0;
++}
++
++static int evb_init(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev = &evb_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++ int err = 0;
++
++ priv->dev_id = evb_dev->obj_desc.id;
++
++ err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_open err %d\n", err);
++ goto err_exit;
++ }
++ if (!priv->mux_handle) {
++ dev_err(dev, "dpdmux_open returned null handle but no error\n");
++ err = -EFAULT;
++ goto err_exit;
++ }
++
++ err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle,
++ &priv->attr);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_get_attributes err %d\n", err);
++ goto err_close;
++ }
++
++ err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle);
++ if (unlikely(err)) {
++ dev_err(dev, "dpdmux_reset err %d\n", err);
++ goto err_close;
++ }
++
++ return 0;
++
++err_close:
++ dpdmux_close(priv->mc_io, 0, priv->mux_handle);
++err_exit:
++ return err;
++}
++
++static int evb_remove(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev = &evb_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct evb_priv *priv = netdev_priv(netdev);
++ struct evb_port_priv *port_priv;
++ struct list_head *pos;
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct evb_port_priv, list);
++
++ rtnl_lock();
++ netdev_upper_dev_unlink(port_priv->netdev, netdev);
++ rtnl_unlock();
++
++ unregister_netdev(port_priv->netdev);
++ free_netdev(port_priv->netdev);
++ }
++
++ evb_teardown_irqs(evb_dev);
++
++ unregister_netdev(netdev);
++
++ evb_takedown(evb_dev);
++ fsl_mc_portal_free(priv->mc_io);
++
++ dev_set_drvdata(dev, NULL);
++ free_netdev(netdev);
++
++ return 0;
++}
++
++static int evb_probe(struct fsl_mc_device *evb_dev)
++{
++ struct device *dev;
++ struct evb_priv *priv = NULL;
++ struct net_device *netdev = NULL;
++ char port_name[IFNAMSIZ];
++ int i;
++ int err = 0;
++
++ dev = &evb_dev->dev;
++
++ /* register switch device, it's for management only - no I/O */
++ netdev = alloc_etherdev(sizeof(*priv));
++ if (!netdev) {
++ dev_err(dev, "alloc_etherdev error\n");
++ return -ENOMEM;
++ }
++ netdev->netdev_ops = &evb_ops;
++
++ dev_set_drvdata(dev, netdev);
++
++ priv = netdev_priv(netdev);
++
++ err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io);
++ if (unlikely(err)) {
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ goto err_free_netdev;
++ }
++ if (!priv->mc_io) {
++ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
++ err = -EFAULT;
++ goto err_free_netdev;
++ }
++
++ err = evb_init(evb_dev);
++ if (unlikely(err)) {
++ dev_err(dev, "evb init err %d\n", err);
++ goto err_free_cmdport;
++ }
++
++ INIT_LIST_HEAD(&priv->port_list);
++ netdev->flags |= IFF_PROMISC | IFF_MASTER;
++
++ dev_alloc_name(netdev, "evb%d");
++
++ /* register switch ports */
++ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
++
++ /* only register downlinks? */
++ for (i = 0; i < priv->attr.num_ifs + 1; i++) {
++ struct net_device *port_netdev;
++ struct evb_port_priv *port_priv;
++
++ if (i) {
++ port_netdev =
++ alloc_etherdev(sizeof(struct evb_port_priv));
++ if (!port_netdev) {
++ dev_err(dev, "alloc_etherdev error\n");
++ goto err_takedown;
++ }
++
++ port_priv = netdev_priv(port_netdev);
++
++ port_netdev->flags |= IFF_PROMISC | IFF_SLAVE;
++
++ dev_alloc_name(port_netdev, port_name);
++ } else {
++ port_netdev = netdev;
++ port_priv = &priv->uplink;
++ }
++
++ port_priv->netdev = port_netdev;
++ port_priv->evb_priv = priv;
++ port_priv->port_index = i;
++
++ SET_NETDEV_DEV(port_netdev, dev);
++
++ if (i) {
++ port_netdev->netdev_ops = &evb_port_ops;
++
++ err = register_netdev(port_netdev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev err %d\n", err);
++ free_netdev(port_netdev);
++ goto err_takedown;
++ }
++
++ rtnl_lock();
++ err = netdev_master_upper_dev_link(port_netdev, netdev, NULL, NULL);
++ if (unlikely(err)) {
++ dev_err(dev, "netdev_master_upper_dev_link err %d\n",
++ err);
++ unregister_netdev(port_netdev);
++ free_netdev(port_netdev);
++ rtnl_unlock();
++ goto err_takedown;
++ }
++ rtmsg_ifinfo(RTM_NEWLINK, port_netdev,
++ IFF_SLAVE, GFP_KERNEL);
++ rtnl_unlock();
++
++ list_add(&(port_priv->list), &(priv->port_list));
++ } else {
++ err = register_netdev(netdev);
++
++ if (err < 0) {
++ dev_err(dev, "register_netdev error %d\n", err);
++ goto err_takedown;
++ }
++ }
++
++ port_netdev->ethtool_ops = &evb_port_ethtool_ops;
++
++ /* ports are up from init */
++ rtnl_lock();
++ err = dev_open(port_netdev);
++ rtnl_unlock();
++ if (unlikely(err))
++ dev_warn(dev, "dev_open err %d\n", err);
++ }
++
++ /* setup irqs */
++ err = evb_setup_irqs(evb_dev);
++ if (unlikely(err)) {
++ dev_warn(dev, "evb_setup_irqs err %d\n", err);
++ goto err_takedown;
++ }
++
++ dev_info(dev, "probed evb device with %d ports\n",
++ priv->attr.num_ifs);
++ return 0;
++
++err_takedown:
++ evb_remove(evb_dev);
++err_free_cmdport:
++ fsl_mc_portal_free(priv->mc_io);
++err_free_netdev:
++ return err;
++}
++
++static const struct fsl_mc_device_match_id evb_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpdmux",
++ .ver_major = DPDMUX_VER_MAJOR,
++ .ver_minor = DPDMUX_VER_MINOR,
++ },
++ {}
++};
++
++static struct fsl_mc_driver evb_drv = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = evb_probe,
++ .remove = evb_remove,
++ .match_id_table = evb_match_id_table,
++};
++
++module_fsl_mc_driver(evb_drv);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)");
diff --git a/target/linux/layerscape/patches-4.4/7216-dpaa2-evb-Fix-interrupt-handling.patch b/target/linux/layerscape/patches-4.4/7216-dpaa2-evb-Fix-interrupt-handling.patch
new file mode 100644
index 0000000..d3d976a
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7216-dpaa2-evb-Fix-interrupt-handling.patch
@@ -0,0 +1,69 @@
+From 4efb592d8a931669df5df04bedcae8cbc85c3700 Mon Sep 17 00:00:00 2001
+From: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+Date: Wed, 17 Feb 2016 16:31:01 +0200
+Subject: [PATCH 216/226] dpaa2-evb: Fix interrupt handling
+
+Mask only the events handled by the driver - DPDMUX_IRQ_EVENT_LINK_CHANGED.
+
+Use clear-on-read mechanism for the interrupt status and avoid calling
+dpdmux_clear_irq_status(). Status contains the events handled (only link
+state change for the moment) and masks the first 16-bits, as they are used
+to store the interface ID that generated the event.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+---
+ drivers/staging/fsl-dpaa2/evb/evb.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/evb/evb.c
++++ b/drivers/staging/fsl-dpaa2/evb/evb.c
+@@ -151,7 +151,9 @@ static irqreturn_t _evb_irq0_handler_thr
+ struct fsl_mc_io *io = priv->mc_io;
+ uint16_t token = priv->mux_handle;
+ int irq_index = DPDMUX_IRQ_INDEX_IF;
+- uint32_t status = 0, clear = 0;
++
++ /* Mask the events and the if_id reserved bits to be cleared on read */
++ uint32_t status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
+ int err;
+
+ /* Sanity check */
+@@ -163,23 +165,21 @@ static irqreturn_t _evb_irq0_handler_thr
+ err = dpdmux_get_irq_status(io, 0, token, irq_index, &status);
+ if (unlikely(err)) {
+ netdev_err(netdev, "Can't get irq status (err %d)", err);
+- clear = 0xffffffff;
++ err = dpdmux_clear_irq_status(io, 0, token, irq_index,
++ 0xFFFFFFFF);
++ if (unlikely(err))
++ netdev_err(netdev, "Can't clear irq status (err %d)",
++ err);
+ goto out;
+ }
+
+- /* FIXME clear irq status */
+-
+ if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) {
+- clear |= DPDMUX_IRQ_EVENT_LINK_CHANGED;
+-
+ err = evb_links_state_update(priv);
+ if (unlikely(err))
+ goto out;
+ }
++
+ out:
+- err = dpdmux_clear_irq_status(io, 0, token, irq_index, clear);
+- if (unlikely(err))
+- netdev_err(netdev, "Can't clear irq status (err %d)", err);
+ return IRQ_HANDLED;
+ }
+
+@@ -191,7 +191,7 @@ static int evb_setup_irqs(struct fsl_mc_
+ int err = 0;
+ struct fsl_mc_device_irq *irq;
+ const int irq_index = DPDMUX_IRQ_INDEX_IF;
+- uint32_t mask = ~0x0u; /* FIXME: unmask handled irqs */
++ uint32_t mask = DPDMUX_IRQ_EVENT_LINK_CHANGED;
+
+ err = fsl_mc_allocate_irqs(evb_dev);
+ if (unlikely(err)) {
diff --git a/target/linux/layerscape/patches-4.4/7217-dpaa2-evb-Add-object-version-check.patch b/target/linux/layerscape/patches-4.4/7217-dpaa2-evb-Add-object-version-check.patch
new file mode 100644
index 0000000..13d61cf
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7217-dpaa2-evb-Add-object-version-check.patch
@@ -0,0 +1,43 @@
+From 213c59501bbd6da8c56e95f90f8a8c6af2682002 Mon Sep 17 00:00:00 2001
+From: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+Date: Thu, 18 Feb 2016 10:54:40 +0200
+Subject: [PATCH 217/226] dpaa2-evb: Add object version check
+
+Abort probing if DPDMUX object version is smaller than required.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+---
+ drivers/staging/fsl-dpaa2/evb/evb.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/drivers/staging/fsl-dpaa2/evb/evb.c
++++ b/drivers/staging/fsl-dpaa2/evb/evb.c
+@@ -44,6 +44,10 @@
+ #include "dpdmux.h"
+ #include "dpdmux-cmd.h"
+
++/* Minimal supported DPDMUX version */
++#define DPDMUX_MIN_VER_MAJOR 5
++#define DPDMUX_MIN_VER_MINOR 0
++
+ /* IRQ index */
+ #define DPDMUX_MAX_IRQ_NUM 2
+
+@@ -1004,6 +1008,17 @@ static int evb_init(struct fsl_mc_device
+ goto err_close;
+ }
+
++ /* Minimum supported DPDMUX version check */
++ if (priv->attr.version.major < DPDMUX_MIN_VER_MAJOR ||
++ (priv->attr.version.major == DPDMUX_MIN_VER_MAJOR &&
++ priv->attr.version.minor < DPDMUX_MIN_VER_MINOR)) {
++ dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n",
++ priv->attr.version.major, priv->attr.version.minor,
++ DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR);
++ err = -ENOTSUPP;
++ goto err_close;
++ }
++
+ err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle);
+ if (unlikely(err)) {
+ dev_err(dev, "dpdmux_reset err %d\n", err);
diff --git a/target/linux/layerscape/patches-4.4/7218-dpaa2-evb-Cosmetic-cleanup.patch b/target/linux/layerscape/patches-4.4/7218-dpaa2-evb-Cosmetic-cleanup.patch
new file mode 100644
index 0000000..54f77ab
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7218-dpaa2-evb-Cosmetic-cleanup.patch
@@ -0,0 +1,20 @@
+From 54d026dafa1f7d17758615736123917cc4f3f203 Mon Sep 17 00:00:00 2001
+From: Mihai Caraman <mihai.caraman at freescale.com>
+Date: Tue, 5 Apr 2016 14:12:10 +0000
+Subject: [PATCH 218/226] dpaa2-evb: Cosmetic cleanup
+
+Replace obsolete terms.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/evb/evb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/fsl-dpaa2/evb/evb.c
++++ b/drivers/staging/fsl-dpaa2/evb/evb.c
+@@ -1228,4 +1228,4 @@ static struct fsl_mc_driver evb_drv = {
+ module_fsl_mc_driver(evb_drv);
+
+ MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)");
++MODULE_DESCRIPTION("DPAA2 Edge Virtual Bridge driver (prototype)");
diff --git a/target/linux/layerscape/patches-4.4/7219-dpaa2-evb-match-id-cleanup.patch b/target/linux/layerscape/patches-4.4/7219-dpaa2-evb-match-id-cleanup.patch
new file mode 100644
index 0000000..be1ab1b
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7219-dpaa2-evb-match-id-cleanup.patch
@@ -0,0 +1,26 @@
+From 744bd6494a51443c2a7d32ed76e94e4fc5bd2404 Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Thu, 14 Jul 2016 17:32:23 -0500
+Subject: [PATCH 219/226] dpaa2-evb: match id cleanup
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/evb/evb.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/evb/evb.c
++++ b/drivers/staging/fsl-dpaa2/evb/evb.c
+@@ -1205,12 +1205,10 @@ err_free_netdev:
+ return err;
+ }
+
+-static const struct fsl_mc_device_match_id evb_match_id_table[] = {
++static const struct fsl_mc_device_id evb_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpdmux",
+- .ver_major = DPDMUX_VER_MAJOR,
+- .ver_minor = DPDMUX_VER_MINOR,
+ },
+ {}
+ };
diff --git a/target/linux/layerscape/patches-4.4/7220-dpaa2-ethsw-Ethernet-Switch-driver.patch b/target/linux/layerscape/patches-4.4/7220-dpaa2-ethsw-Ethernet-Switch-driver.patch
new file mode 100644
index 0000000..067eb59
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7220-dpaa2-ethsw-Ethernet-Switch-driver.patch
@@ -0,0 +1,6605 @@
+From 8df017d70c54ceafc99b7904785603c678a2e5c1 Mon Sep 17 00:00:00 2001
+From: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+Date: Tue, 22 Sep 2015 11:36:34 +0300
+Subject: [PATCH 220/226] dpaa2-ethsw: Ethernet Switch driver
+
+This is a commit of the cummulative, squashed dpaa2-l2switch patches.
+All the commit logs are preserved below.
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+---------------------------------------------------------------------
+
+dpaa2-ethsw: Ethernet Switch driver
+
+Initial support for DPAA2 L2 switch. The switch and all ports are
+presented as network interfaces in linux (swX and swXpY). I/O
+functionality is not available on these interfaces, they are exclusively
+for management.
+
+Configuration is done using bridge tool. Supported commands are:
+- fdb operations with unicast/multicast addresses
+- vlan configuration
+- setting STP state of ports
+- flooding, learning control
+
+Offers support for retrieving port statistics via ethtool (or similar
+applications).
+
+This patch contains the following patches squashed together:
+staging: fsl-dpaa2: ethsw: ethernet switch driver
+dpaa2-ethsw: Include by default in configuration
+staging: fsl-dpaa2: ethsw: Rebasing onto kernel 4.0
+staging: fsl-mc: migrated remaining flibs for MC fw 8.0.0
+dpaa2-ethsw: Prefix driver name with dpaa2-
+dpaa2-ethsw: Set carrier state on probe
+dpaa2-ethsw: Add support for link state update
+
+These patches were initally submitted by:
+Alex Marginean <alexandru.marginean at freescale.com>
+J. German Rivera <German.Rivera at freescale.com>
+Razvan Stefanescu <razvan.stefanescu at freescale.com>
+
+and reviewed by Stuart Yoder <stuart.yoder at freescale.com>
+
+Ported to linux-v4.1 by updating iflink usage and ndo_bridge_getlink()
+parameters list update.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+[Stuart: resolved minor merge conflicts]
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+
+dpaa2-ethsw: Update dpsw binary interface to 7.0
+
+This corresponds to MC release 0.8.0.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+
+dpaa2-ethsw: Add object version check
+
+Abort probing if DPSW object version is smaller than required.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+
+dpaa2-ethsw: Fix interrupt handling
+
+Mask only the events handled by the driver - DPSW_IRQ_EVENT_LINK_CHANGED.
+
+Use clear-on-read mechanism for the interrupt status and avoid calling
+dpsw_clear_irq_status(). Status contains the events handled (only link
+state change for the moment) and masks the first 16-bits, as they are used
+to store the interface ID that generated the event.
+
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu at freescale.com>
+
+dpaa2-ethsw: resolve compile issues on uprev to 4.5
+
+-irq_number field no longer exists in fsl-mc interrupt
+ struct
+-netdev_master_upper_dev_link() has 2 new parameters, which
+ are set to NULL for now
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ MAINTAINERS | 6 +
+ drivers/staging/fsl-dpaa2/Kconfig | 1 +
+ drivers/staging/fsl-dpaa2/Makefile | 1 +
+ drivers/staging/fsl-dpaa2/ethsw/Kconfig | 7 +
+ drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 916 ++++++++++++
+ drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 1639 +++++++++++++++++++++
+ drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 2164 ++++++++++++++++++++++++++++
+ drivers/staging/fsl-dpaa2/ethsw/switch.c | 1711 ++++++++++++++++++++++
+ drivers/staging/fsl-mc/include/net.h | 1 -
+ 10 files changed, 6455 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -4554,6 +4554,12 @@ S: Maintained
+ F: drivers/staging/fsl-mc/bus/mc-ioctl.h
+ F: drivers/staging/fsl-mc/bus/mc-restool.c
+
++FREESCALE DPAA2 ETHERNET SWITCH DRIVER
++M: Alex Marginean <Alexandru.Marginean at freescale.com>
++L: linux-kernel at vger.kernel.org
++S: Maintained
++F: drivers/staging/fsl-dpaa2/ethsw/
++
+ FREESCALE DPAA2 MAC/PHY INTERFACE DRIVER
+ M: Alex Marginean <Alexandru.Marginean at freescale.com>
+ L: linux-kernel at vger.kernel.org
+--- a/drivers/staging/fsl-dpaa2/Kconfig
++++ b/drivers/staging/fsl-dpaa2/Kconfig
+@@ -11,3 +11,4 @@ config FSL_DPAA2
+ source "drivers/staging/fsl-dpaa2/ethernet/Kconfig"
+ source "drivers/staging/fsl-dpaa2/mac/Kconfig"
+ source "drivers/staging/fsl-dpaa2/evb/Kconfig"
++source "drivers/staging/fsl-dpaa2/ethsw/Kconfig"
+--- a/drivers/staging/fsl-dpaa2/Makefile
++++ b/drivers/staging/fsl-dpaa2/Makefile
+@@ -5,3 +5,4 @@
+ obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
+ obj-$(CONFIG_FSL_DPAA2_MAC) += mac/
+ obj-$(CONFIG_FSL_DPAA2_EVB) += evb/
++obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
+@@ -0,0 +1,7 @@
++config FSL_DPAA2_ETHSW
++ tristate "DPAA2 Ethernet Switch"
++ depends on FSL_MC_BUS && FSL_DPAA2 && FSL_DPAA2_ETH
++ select FSL_DPAA2_MAC
++ default y
++ ---help---
++ Prototype driver for DPAA2 Ethernet Switch.
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
+@@ -0,0 +1,10 @@
++
++obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
++
++dpaa2-ethsw-objs := switch.o dpsw.o
++
++all:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
++
++clean:
++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+@@ -0,0 +1,916 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPSW_CMD_H
++#define __FSL_DPSW_CMD_H
++
++/* DPSW Version */
++#define DPSW_VER_MAJOR 7
++#define DPSW_VER_MINOR 0
++
++/* Command IDs */
++#define DPSW_CMDID_CLOSE 0x800
++#define DPSW_CMDID_OPEN 0x802
++#define DPSW_CMDID_CREATE 0x902
++#define DPSW_CMDID_DESTROY 0x900
++
++#define DPSW_CMDID_ENABLE 0x002
++#define DPSW_CMDID_DISABLE 0x003
++#define DPSW_CMDID_GET_ATTR 0x004
++#define DPSW_CMDID_RESET 0x005
++#define DPSW_CMDID_IS_ENABLED 0x006
++
++#define DPSW_CMDID_SET_IRQ 0x010
++#define DPSW_CMDID_GET_IRQ 0x011
++#define DPSW_CMDID_SET_IRQ_ENABLE 0x012
++#define DPSW_CMDID_GET_IRQ_ENABLE 0x013
++#define DPSW_CMDID_SET_IRQ_MASK 0x014
++#define DPSW_CMDID_GET_IRQ_MASK 0x015
++#define DPSW_CMDID_GET_IRQ_STATUS 0x016
++#define DPSW_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPSW_CMDID_SET_REFLECTION_IF 0x022
++
++#define DPSW_CMDID_ADD_CUSTOM_TPID 0x024
++
++#define DPSW_CMDID_REMOVE_CUSTOM_TPID 0x026
++
++#define DPSW_CMDID_IF_SET_TCI 0x030
++#define DPSW_CMDID_IF_SET_STP 0x031
++#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES 0x032
++#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN 0x033
++#define DPSW_CMDID_IF_GET_COUNTER 0x034
++#define DPSW_CMDID_IF_SET_COUNTER 0x035
++#define DPSW_CMDID_IF_SET_TX_SELECTION 0x036
++#define DPSW_CMDID_IF_ADD_REFLECTION 0x037
++#define DPSW_CMDID_IF_REMOVE_REFLECTION 0x038
++#define DPSW_CMDID_IF_SET_FLOODING_METERING 0x039
++#define DPSW_CMDID_IF_SET_METERING 0x03A
++#define DPSW_CMDID_IF_SET_EARLY_DROP 0x03B
++
++#define DPSW_CMDID_IF_ENABLE 0x03D
++#define DPSW_CMDID_IF_DISABLE 0x03E
++
++#define DPSW_CMDID_IF_GET_ATTR 0x042
++
++#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH 0x044
++#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH 0x045
++#define DPSW_CMDID_IF_GET_LINK_STATE 0x046
++#define DPSW_CMDID_IF_SET_FLOODING 0x047
++#define DPSW_CMDID_IF_SET_BROADCAST 0x048
++#define DPSW_CMDID_IF_SET_MULTICAST 0x049
++#define DPSW_CMDID_IF_GET_TCI 0x04A
++
++#define DPSW_CMDID_IF_SET_LINK_CFG 0x04C
++
++#define DPSW_CMDID_VLAN_ADD 0x060
++#define DPSW_CMDID_VLAN_ADD_IF 0x061
++#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED 0x062
++#define DPSW_CMDID_VLAN_ADD_IF_FLOODING 0x063
++#define DPSW_CMDID_VLAN_REMOVE_IF 0x064
++#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED 0x065
++#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING 0x066
++#define DPSW_CMDID_VLAN_REMOVE 0x067
++#define DPSW_CMDID_VLAN_GET_IF 0x068
++#define DPSW_CMDID_VLAN_GET_IF_FLOODING 0x069
++#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED 0x06A
++#define DPSW_CMDID_VLAN_GET_ATTRIBUTES 0x06B
++
++#define DPSW_CMDID_FDB_GET_MULTICAST 0x080
++#define DPSW_CMDID_FDB_GET_UNICAST 0x081
++#define DPSW_CMDID_FDB_ADD 0x082
++#define DPSW_CMDID_FDB_REMOVE 0x083
++#define DPSW_CMDID_FDB_ADD_UNICAST 0x084
++#define DPSW_CMDID_FDB_REMOVE_UNICAST 0x085
++#define DPSW_CMDID_FDB_ADD_MULTICAST 0x086
++#define DPSW_CMDID_FDB_REMOVE_MULTICAST 0x087
++#define DPSW_CMDID_FDB_SET_LEARNING_MODE 0x088
++#define DPSW_CMDID_FDB_GET_ATTR 0x089
++
++#define DPSW_CMDID_ACL_ADD 0x090
++#define DPSW_CMDID_ACL_REMOVE 0x091
++#define DPSW_CMDID_ACL_ADD_ENTRY 0x092
++#define DPSW_CMDID_ACL_REMOVE_ENTRY 0x093
++#define DPSW_CMDID_ACL_ADD_IF 0x094
++#define DPSW_CMDID_ACL_REMOVE_IF 0x095
++#define DPSW_CMDID_ACL_GET_ATTR 0x096
++
++#define DPSW_CMDID_CTRL_IF_GET_ATTR 0x0A0
++#define DPSW_CMDID_CTRL_IF_SET_POOLS 0x0A1
++#define DPSW_CMDID_CTRL_IF_ENABLE 0x0A2
++#define DPSW_CMDID_CTRL_IF_DISABLE 0x0A3
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_OPEN(cmd, dpsw_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpsw_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->num_ifs);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->adv.max_fdbs);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->adv.max_meters_per_if);\
++ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_component_type, \
++ cfg->adv.component_type);\
++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_vlans);\
++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_fdb_entries);\
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.fdb_aging_time);\
++ MC_CMD_OP(cmd, 1, 48, 16, uint16_t, cfg->adv.max_fdb_mc_groups);\
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->num_ifs);\
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->max_fdbs);\
++ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->num_fdbs);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->max_vlans);\
++ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_vlans);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->max_fdb_entries);\
++ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->fdb_aging_time);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, attr->mem_size);\
++ MC_RSP_OP(cmd, 2, 48, 16, uint16_t, attr->max_fdb_mc_groups);\
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\
++ MC_RSP_OP(cmd, 4, 0, 8, uint8_t, attr->max_meters_per_if);\
++ MC_RSP_OP(cmd, 4, 8, 4, enum dpsw_component_type, \
++ attr->component_type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_SET_REFLECTION_IF(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 1, int, en);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_BROADCAST(cmd, if_id, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 1, int, en);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_MULTICAST(cmd, if_id, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 1, int, en);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 12, uint16_t, cfg->vlan_id);\
++ MC_CMD_OP(cmd, 0, 28, 1, uint8_t, cfg->dei);\
++ MC_CMD_OP(cmd, 0, 29, 3, uint8_t, cfg->pcp);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_GET_TCI(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IF_GET_TCI(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, cfg->dei);\
++ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, cfg->pcp);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_STP(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
++ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_stp_state, cfg->state);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_accepted_frames, cfg->type);\
++ MC_CMD_OP(cmd, 0, 20, 4, enum dpsw_action, cfg->unaccept_act);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 1, int, accept_all);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IF_GET_COUNTER(cmd, counter) \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, counter);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 3, enum dpsw_priority_selector, \
++ cfg->priority_selector);\
++ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->tc_id[0]);\
++ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->tc_id[1]);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->tc_id[2]);\
++ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->tc_id[3]);\
++ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->tc_id[4]);\
++ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->tc_id[5]);\
++ MC_CMD_OP(cmd, 1, 48, 8, uint8_t, cfg->tc_id[6]);\
++ MC_CMD_OP(cmd, 1, 56, 8, uint8_t, cfg->tc_id[7]);\
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\
++ MC_CMD_OP(cmd, 2, 16, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[0].mode);\
++ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\
++ MC_CMD_OP(cmd, 2, 48, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[1].mode);\
++ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\
++ MC_CMD_OP(cmd, 3, 16, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[2].mode);\
++ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\
++ MC_CMD_OP(cmd, 3, 48, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[3].mode);\
++ MC_CMD_OP(cmd, 4, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\
++ MC_CMD_OP(cmd, 4, 16, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[4].mode);\
++ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\
++ MC_CMD_OP(cmd, 4, 48, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[5].mode);\
++ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\
++ MC_CMD_OP(cmd, 5, 16, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[6].mode);\
++ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\
++ MC_CMD_OP(cmd, 5, 48, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[7].mode);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
++ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
++ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\
++ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id);\
++ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\
++ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_PREP_EARLY_DROP(ext, cfg) \
++do { \
++ MC_PREP_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \
++ MC_PREP_OP(ext, 0, 2, 2, \
++ enum dpsw_early_drop_unit, cfg->units); \
++ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
++ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
++ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
++ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
++ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
++ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
++ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_EXT_EARLY_DROP(ext, cfg) \
++do { \
++ MC_EXT_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \
++ MC_EXT_OP(ext, 0, 2, 2, \
++ enum dpsw_early_drop_unit, cfg->units); \
++ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
++ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
++ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
++ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
++ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
++ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
++ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, if_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_ENABLE(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_DISABLE(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_GET_ATTR(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IF_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 4, enum dpsw_accepted_frames, \
++ attr->admit_untagged);\
++ MC_RSP_OP(cmd, 0, 5, 1, int, attr->enabled);\
++ MC_RSP_OP(cmd, 0, 6, 1, int, attr->accept_all_vlan);\
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->num_tcs);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qdid);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->rate);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, frame_length);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, frame_length) \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, frame_length)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IF_GET_LINK_STATE(cmd, state) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++#define DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_REMOVE(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_VLAN_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->fdb_id); \
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_ifs); \
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_untagged_ifs); \
++ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->num_flooding_ifs); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_GET_IF(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_VLAN_GET_IF(cmd, cfg) \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg) \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_VLAN_GET_IF_UNTAGGED(cmd, cfg) \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs)
++
++/* param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_ADD(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->fdb_aging_time);\
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->num_fdb_entries);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_FDB_ADD(cmd, fdb_id) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, fdb_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_REMOVE(cmd, fdb_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
++ MC_CMD_OP(cmd, 1, 0, 8, uint16_t, cfg->if_egress);\
++ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_FDB_GET_UNICAST(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\
++ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\
++ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\
++ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\
++ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->num_ifs);\
++ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\
++ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\
++ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_fdb_learning_mode, mode);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_FDB_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->max_fdb_entries);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->fdb_aging_time);\
++ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_fdb_mc_groups);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_fdb_mc_groups);\
++ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_learning_mode, \
++ attr->learning_mode);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_ADD(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->max_entries)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_ACL_ADD(cmd, acl_id) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, acl_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_REMOVE(cmd, acl_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_PREP_ACL_ENTRY(ext, key) \
++do { \
++ MC_PREP_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\
++ MC_PREP_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\
++ MC_PREP_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\
++ MC_PREP_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\
++ MC_PREP_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\
++ MC_PREP_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\
++ MC_PREP_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\
++ MC_PREP_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\
++ MC_PREP_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\
++ MC_PREP_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\
++ MC_PREP_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\
++ MC_PREP_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\
++ MC_PREP_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\
++ MC_PREP_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\
++ MC_PREP_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\
++ MC_PREP_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\
++ MC_PREP_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\
++ MC_PREP_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\
++ MC_PREP_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\
++ MC_PREP_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\
++ MC_PREP_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\
++ MC_PREP_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\
++ MC_PREP_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\
++ MC_PREP_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\
++ MC_PREP_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\
++ MC_PREP_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\
++ MC_PREP_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\
++ MC_PREP_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\
++ MC_PREP_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\
++ MC_PREP_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\
++ MC_PREP_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\
++ MC_PREP_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\
++ MC_PREP_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\
++ MC_PREP_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\
++ MC_PREP_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\
++ MC_PREP_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\
++ MC_PREP_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\
++ MC_PREP_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\
++ MC_PREP_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\
++ MC_PREP_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\
++ MC_PREP_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\
++ MC_PREP_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\
++ MC_PREP_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\
++ MC_PREP_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_EXT_ACL_ENTRY(ext, key) \
++do { \
++ MC_EXT_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\
++ MC_EXT_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\
++ MC_EXT_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\
++ MC_EXT_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\
++ MC_EXT_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\
++ MC_EXT_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\
++ MC_EXT_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\
++ MC_EXT_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\
++ MC_EXT_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\
++ MC_EXT_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\
++ MC_EXT_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\
++ MC_EXT_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\
++ MC_EXT_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\
++ MC_EXT_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\
++ MC_EXT_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\
++ MC_EXT_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\
++ MC_EXT_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\
++ MC_EXT_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\
++ MC_EXT_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\
++ MC_EXT_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\
++ MC_EXT_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\
++ MC_EXT_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\
++ MC_EXT_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\
++ MC_EXT_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\
++ MC_EXT_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\
++ MC_EXT_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\
++ MC_EXT_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\
++ MC_EXT_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\
++ MC_EXT_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\
++ MC_EXT_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\
++ MC_EXT_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\
++ MC_EXT_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\
++ MC_EXT_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\
++ MC_EXT_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\
++ MC_EXT_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\
++ MC_EXT_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\
++ MC_EXT_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\
++ MC_EXT_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\
++ MC_EXT_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\
++ MC_EXT_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\
++ MC_EXT_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\
++ MC_EXT_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\
++ MC_EXT_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\
++ MC_EXT_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\
++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\
++ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\
++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\
++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\
++ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\
++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_GET_ATTR(cmd, acl_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_ACL_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_entries);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_entries);\
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_ifs);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rx_fqid);\
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, attr->rx_err_fqid);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tx_err_conf_fqid);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_CTRL_IF_SET_POOLS(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \
++ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \
++ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \
++ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \
++ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \
++ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \
++ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \
++ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \
++ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \
++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \
++ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\
++ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \
++ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\
++ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\
++ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\
++ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \
++ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\
++ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \
++ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\
++} while (0)
++
++#endif /* __FSL_DPSW_CMD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+@@ -0,0 +1,1639 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "../../fsl-mc/include/mc-sys.h"
++#include "../../fsl-mc/include/mc-cmd.h"
++#include "dpsw.h"
++#include "dpsw-cmd.h"
++
++/* internal functions */
++static void build_if_id_bitmap(const uint16_t *if_id,
++ const uint16_t num_ifs,
++ struct mc_command *cmd,
++ int start_param)
++{
++ int i;
++
++ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
++ cmd->params[start_param + (if_id[i] / 64)] |= mc_enc(
++ (if_id[i] % 64), 1, 1);
++}
++
++static int read_if_id_bitmap(uint16_t *if_id,
++ uint16_t *num_ifs,
++ struct mc_command *cmd,
++ int start_param)
++{
++ int bitmap[DPSW_MAX_IF] = { 0 };
++ int i, j = 0;
++ int count = 0;
++
++ for (i = 0; i < DPSW_MAX_IF; i++) {
++ bitmap[i] = (int)mc_dec(cmd->params[start_param + i / 64],
++ i % 64, 1);
++ count += bitmap[i];
++ }
++
++ *num_ifs = (uint16_t)count;
++
++ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
++ if (bitmap[i]) {
++ if_id[j] = (uint16_t)i;
++ j++;
++ }
++ }
++
++ return 0;
++}
++
++/* DPSW APIs */
++int dpsw_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpsw_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPSW_CMD_OPEN(cmd, dpsw_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpsw_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpsw_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPSW_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpsw_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpsw_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpsw_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpsw_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPSW_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPSW_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpsw_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_SET_REFLECTION_IF(cmd, if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_link_state *state)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_IF_GET_LINK_STATE(cmd, state);
++
++ return 0;
++}
++
++int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_tci_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_tci_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err = 0;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_GET_TCI(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_IF_GET_TCI(cmd, cfg);
++
++ return 0;
++}
++
++int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_stp_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_STP(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_accepted_frames_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int accept_all)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpsw_counter type,
++ uint64_t *counter)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_IF_GET_COUNTER(cmd, *counter);
++
++ return 0;
++}
++
++int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpsw_counter type,
++ uint64_t counter)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_tx_selection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_reflection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_reflection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_metering_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint8_t tc_id,
++ const struct dpsw_metering_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
++ uint8_t *early_drop_buf)
++{
++ uint64_t *ext_params = (uint64_t *)early_drop_buf;
++
++ DPSW_PREP_EARLY_DROP(ext_params, cfg);
++}
++
++int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint8_t tc_id,
++ uint64_t early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_custom_tpid_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
++ cmd_flags,
++ token);
++ DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_custom_tpid_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
++ cmd_flags,
++ token);
++ DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_ENABLE(cmd, if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_DISABLE(cmd, if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_if_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_GET_ATTR(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_IF_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint16_t frame_length)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint16_t *frame_length)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, *frame_length);
++
++ return 0;
++}
++
++int dpsw_vlan_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_REMOVE(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_VLAN_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_GET_IF(cmd, vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_VLAN_GET_IF(cmd, cfg);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1);
++
++ return 0;
++}
++
++int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1);
++
++ return 0;
++}
++
++int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_VLAN_GET_IF(cmd, cfg);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1);
++
++ return 0;
++}
++
++int dpsw_fdb_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *fdb_id,
++ const struct dpsw_fdb_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_ADD(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_FDB_ADD(cmd, *fdb_id);
++
++ return 0;
++}
++
++int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_REMOVE(cmd, fdb_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_unicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_FDB_GET_UNICAST(cmd, cfg);
++
++ return 0;
++}
++
++int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_multicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 2);
++
++ return 0;
++}
++
++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ enum dpsw_fdb_learning_mode mode)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_FDB_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_acl_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *acl_id,
++ const struct dpsw_acl_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_ADD(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_ACL_ADD(cmd, *acl_id);
++
++ return 0;
++}
++
++int dpsw_acl_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_REMOVE(cmd, acl_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
++ uint8_t *entry_cfg_buf)
++{
++ uint64_t *ext_params = (uint64_t *)entry_cfg_buf;
++
++ DPSW_PREP_ACL_ENTRY(ext_params, key);
++}
++
++int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_entry_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_entry_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ struct dpsw_acl_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_GET_ATTR(cmd, acl_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_ACL_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpsw_ctrl_if_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_ctrl_if_pools_cfg *pools)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
++ cmd_flags,
++ token);
++ DPSW_CMD_CTRL_IF_SET_POOLS(cmd, pools);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++* @brief Function disables control interface
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+@@ -0,0 +1,2164 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPSW_H
++#define __FSL_DPSW_H
++
++#include "../../fsl-mc/include/net.h"
++
++/* Data Path L2-Switch API
++ * Contains API for handling DPSW topology and functionality
++ */
++
++struct fsl_mc_io;
++
++/**
++ * DPSW general definitions
++ */
++
++/**
++ * Maximum number of traffic class priorities
++ */
++#define DPSW_MAX_PRIORITIES 8
++/**
++ * Maximum number of interfaces
++ */
++#define DPSW_MAX_IF 64
++
++/**
++ * dpsw_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpsw_id: DPSW unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpsw_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpsw_id,
++ uint16_t *token);
++
++/**
++ * dpsw_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * DPSW options
++ */
++
++/**
++ * Disable flooding
++ */
++#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
++/**
++ * Disable Multicast
++ */
++#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
++/**
++ * Support control interface
++ */
++#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
++/**
++ * Disable flooding metering
++ */
++#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
++/**
++ * Enable metering
++ */
++#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
++
++/**
++ * enum dpsw_component_type - component type of a bridge
++ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
++ * enterprise VLAN bridge or of a Provider Bridge used
++ * to process C-tagged frames
++ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
++ * Provider Bridge
++ *
++ */
++enum dpsw_component_type {
++ DPSW_COMPONENT_TYPE_C_VLAN = 0,
++ DPSW_COMPONENT_TYPE_S_VLAN
++};
++
++/**
++ * struct dpsw_cfg - DPSW configuration
++ * @num_ifs: Number of external and internal interfaces
++ * @adv: Advanced parameters; default is all zeros;
++ * use this structure to change default settings
++ */
++struct dpsw_cfg {
++ uint16_t num_ifs;
++ /**
++ * struct adv - Advanced parameters
++ * @options: Enable/Disable DPSW features (bitmap)
++ * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
++ * @max_meters_per_if: Number of meters per interface
++ * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
++ * @max_fdb_entries: Number of FDB entries for default FDB table;
++ * 0 - indicates default 1024 entries.
++ * @fdb_aging_time: Default FDB aging time for default FDB table;
++ * 0 - indicates default 300 seconds
++ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
++ * 0 - indicates default 32
++ * @component_type: Indicates the component type of this bridge
++ */
++ struct {
++ uint64_t options;
++ uint16_t max_vlans;
++ uint8_t max_meters_per_if;
++ uint8_t max_fdbs;
++ uint16_t max_fdb_entries;
++ uint16_t fdb_aging_time;
++ uint16_t max_fdb_mc_groups;
++ enum dpsw_component_type component_type;
++ } adv;
++};
++
++/**
++ * dpsw_create() - Create the DPSW object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPSW object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpsw_open() function to get an authentication
++ * token first
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpsw_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpsw_destroy() - Destroy the DPSW object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpsw_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpsw_enable() - Enable DPSW functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpsw_disable() - Disable DPSW functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpsw_is_enabled() - Check if the DPSW is enabled
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpsw_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * DPSW IRQ Index and Events
++ */
++
++#define DPSW_IRQ_INDEX_IF 0x0000
++#define DPSW_IRQ_INDEX_L2SW 0x0001
++
++/**
++ * IRQ event - Indicates that the link state changed
++ */
++#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
++
++/**
++ * struct dpsw_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpsw_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpsw_irq_cfg *irq_cfg);
++
++/**
++ * dpsw_get_irq() - Get IRQ information from the DPSW
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpsw_irq_cfg *irq_cfg);
++
++/**
++ * dpsw_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpsw_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned Interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpsw_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpsw_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpsw_get_irq_status() - Get the current status of any pending interrupts
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpsw_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++/**
++ * struct dpsw_attr - Structure representing DPSW attributes
++ * @id: DPSW object ID
++ * @version: DPSW version
++ * @options: Enable/Disable DPSW features
++ * @max_vlans: Maximum Number of VLANs
++ * @max_meters_per_if: Number of meters per interface
++ * @max_fdbs: Maximum Number of FDBs
++ * @max_fdb_entries: Number of FDB entries for default FDB table;
++ * 0 - indicates default 1024 entries.
++ * @fdb_aging_time: Default FDB aging time for default FDB table;
++ * 0 - indicates default 300 seconds
++ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
++ * 0 - indicates default 32
++ * @mem_size: DPSW frame storage memory size
++ * @num_ifs: Number of interfaces
++ * @num_vlans: Current number of VLANs
++ * @num_fdbs: Current number of FDBs
++ * @component_type: Component type of this bridge
++ */
++struct dpsw_attr {
++ int id;
++ /**
++ * struct version - DPSW version
++ * @major: DPSW major version
++ * @minor: DPSW minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint64_t options;
++ uint16_t max_vlans;
++ uint8_t max_meters_per_if;
++ uint8_t max_fdbs;
++ uint16_t max_fdb_entries;
++ uint16_t fdb_aging_time;
++ uint16_t max_fdb_mc_groups;
++ uint16_t num_ifs;
++ uint16_t mem_size;
++ uint16_t num_vlans;
++ uint8_t num_fdbs;
++ enum dpsw_component_type component_type;
++};
++
++/**
++ * dpsw_get_attributes() - Retrieve DPSW attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @attr: Returned DPSW attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpsw_attr *attr);
++
++/**
++ * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Id
++ *
++ * Only one reflection receive interface is allowed per switch
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id);
++
++/**
++ * enum dpsw_action - Action selection for special/control frames
++ * @DPSW_ACTION_DROP: Drop frame
++ * @DPSW_ACTION_REDIRECT: Redirect frame to control port
++ */
++enum dpsw_action {
++ DPSW_ACTION_DROP = 0,
++ DPSW_ACTION_REDIRECT = 1
++};
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct dpsw_link_cfg - Structure representing DPSW link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
++ */
++struct dpsw_link_cfg {
++ uint32_t rate;
++ uint64_t options;
++};
++
++/**
++ * dpsw_if_set_link_cfg() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_link_cfg *cfg);
++/**
++ * struct dpsw_link_state - Structure representing DPSW link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
++ * @up: 0 - covers two cases: down and disconnected, 1 - up
++ */
++struct dpsw_link_state {
++ uint32_t rate;
++ uint64_t options;
++ int up;
++};
++
++/**
++ * dpsw_if_get_link_state - Return the link state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @state: link state 1 - linkup, 0 - link down or disconnected
++ *
++ * @returns '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_link_state *state);
++
++/**
++ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en);
++
++/**
++ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en);
++
++/**
++ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en);
++
++/**
++ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
++ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
++ * to the IEEE 802.1p priority
++ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
++ * separately or in conjunction with PCP to indicate frames
++ * eligible to be dropped in the presence of congestion
++ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
++ * to which the frame belongs. The hexadecimal values
++ * of 0x000 and 0xFFF are reserved;
++ * all other values may be used as VLAN identifiers,
++ * allowing up to 4,094 VLANs
++ */
++struct dpsw_tci_cfg {
++ uint8_t pcp;
++ uint8_t dei;
++ uint16_t vlan_id;
++};
++
++/**
++ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Tag Control Information Configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_tci_cfg *cfg);
++
++/**
++ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Tag Control Information Configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_tci_cfg *cfg);
++
++/**
++ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
++ * @DPSW_STP_STATE_BLOCKING: Blocking state
++ * @DPSW_STP_STATE_LISTENING: Listening state
++ * @DPSW_STP_STATE_LEARNING: Learning state
++ * @DPSW_STP_STATE_FORWARDING: Forwarding state
++ *
++ */
++enum dpsw_stp_state {
++ DPSW_STP_STATE_BLOCKING = 0,
++ DPSW_STP_STATE_LISTENING = 1,
++ DPSW_STP_STATE_LEARNING = 2,
++ DPSW_STP_STATE_FORWARDING = 3
++};
++
++/**
++ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
++ * @vlan_id: VLAN ID STP state
++ * @state: STP state
++ */
++struct dpsw_stp_cfg {
++ uint16_t vlan_id;
++ enum dpsw_stp_state state;
++};
++
++/**
++ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: STP State configuration parameters
++ *
++ * The following STP states are supported -
++ * blocking, listening, learning, forwarding and disabled.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_stp_cfg *cfg);
++
++/**
++ * enum dpsw_accepted_frames - Types of frames to accept
++ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
++ * priority tagged frames
++ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
++ * Priority-Tagged frames received on this interface.
++ *
++ */
++enum dpsw_accepted_frames {
++ DPSW_ADMIT_ALL = 1,
++ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
++};
++
++/**
++ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
++ * @type: Defines ingress accepted frames
++ * @unaccept_act: When a frame is not accepted, it may be discarded or
++ * redirected to control interface depending on this mode
++ */
++struct dpsw_accepted_frames_cfg {
++ enum dpsw_accepted_frames type;
++ enum dpsw_action unaccept_act;
++};
++
++/**
++ * dpsw_if_set_accepted_frames()
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Frame types configuration
++ *
++ * When is admit_only_vlan_tagged- the device will discard untagged
++ * frames or Priority-Tagged frames received on this interface.
++ * When admit_only_untagged- untagged frames or Priority-Tagged
++ * frames received on this interface will be accepted and assigned
++ * to a VID based on the PVID and VID Set for this interface.
++ * When admit_all - the device will accept VLAN tagged, untagged
++ * and priority tagged frames.
++ * The default is admit_all
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_accepted_frames_cfg *cfg);
++
++/**
++ * dpsw_if_set_accept_all_vlan()
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @accept_all: Accept or drop frames having different VLAN
++ *
++ * When this is accept (FALSE), the device will discard incoming
++ * frames for VLANs that do not include this interface in its
++ * Member set. When accept (TRUE), the interface will accept all incoming frames
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int accept_all);
++
++/**
++ * enum dpsw_counter - Counters types
++ * @DPSW_CNT_ING_FRAME: Counts ingress frames
++ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
++ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
++ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
++ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
++ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
++ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
++ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
++ * @DPSW_CNT_EGR_FRAME: Counts egress frames
++ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
++ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
++ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
++ */
++enum dpsw_counter {
++ DPSW_CNT_ING_FRAME = 0x0,
++ DPSW_CNT_ING_BYTE = 0x1,
++ DPSW_CNT_ING_FLTR_FRAME = 0x2,
++ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
++ DPSW_CNT_ING_MCAST_FRAME = 0x4,
++ DPSW_CNT_ING_MCAST_BYTE = 0x5,
++ DPSW_CNT_ING_BCAST_FRAME = 0x6,
++ DPSW_CNT_ING_BCAST_BYTES = 0x7,
++ DPSW_CNT_EGR_FRAME = 0x8,
++ DPSW_CNT_EGR_BYTE = 0x9,
++ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
++ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
++};
++
++/**
++ * dpsw_if_get_counter() - Get specific counter of particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @type: Counter type
++ * @counter: return value
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpsw_counter type,
++ uint64_t *counter);
++
++/**
++ * dpsw_if_set_counter() - Set specific counter of particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @type: Counter type
++ * @counter: New counter value
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpsw_counter type,
++ uint64_t counter);
++
++/**
++ * Maximum number of TC
++ */
++#define DPSW_MAX_TC 8
++
++/**
++ * enum dpsw_priority_selector - User priority
++ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
++ * refers to the IEEE 802.1p priority.
++ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
++ * field from IP header
++ *
++ */
++enum dpsw_priority_selector {
++ DPSW_UP_PCP = 0,
++ DPSW_UP_DSCP = 1
++};
++
++/**
++ * enum dpsw_schedule_mode - Traffic classes scheduling
++ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
++ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
++ */
++enum dpsw_schedule_mode {
++ DPSW_SCHED_STRICT_PRIORITY,
++ DPSW_SCHED_WEIGHTED
++};
++
++/**
++ * struct dpsw_tx_schedule_cfg - traffic class configuration
++ * @mode: Strict or weight-based scheduling
++ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
++ */
++struct dpsw_tx_schedule_cfg {
++ enum dpsw_schedule_mode mode;
++ uint16_t delta_bandwidth;
++};
++
++/**
++ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
++ * class configuration
++ * @priority_selector: Source for user priority regeneration
++ * @tc_id: The Regenerated User priority that the incoming
++ * User Priority is mapped to for this interface
++ * @tc_sched: Traffic classes configuration
++ */
++struct dpsw_tx_selection_cfg {
++ enum dpsw_priority_selector priority_selector;
++ uint8_t tc_id[DPSW_MAX_PRIORITIES];
++ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
++};
++
++/**
++ * dpsw_if_set_tx_selection() - Function is used for mapping variety
++ * of frame fields
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Traffic class mapping configuration
++ *
++ * Function is used for mapping variety of frame fields (DSCP, PCP)
++ * to Traffic Class. Traffic class is a number
++ * in the range from 0 to 7
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_tx_selection_cfg *cfg);
++
++/**
++ * enum dpsw_reflection_filter - Filter type for frames to reflect
++ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
++ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
++ * particular VLAN defined by vid parameter
++ *
++ */
++enum dpsw_reflection_filter {
++ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
++ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
++};
++
++/**
++ * struct dpsw_reflection_cfg - Structure representing reflection information
++ * @filter: Filter type for frames to reflect
++ * @vlan_id: Vlan Id to reflect; valid only when filter type is
++ * DPSW_INGRESS_VLAN
++ */
++struct dpsw_reflection_cfg {
++ enum dpsw_reflection_filter filter;
++ uint16_t vlan_id;
++};
++
++/**
++ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Reflection configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_reflection_cfg *cfg);
++
++/**
++ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Reflection configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_reflection_cfg *cfg);
++
++/**
++ * enum dpsw_metering_mode - Metering modes
++ * @DPSW_METERING_MODE_NONE: metering disabled
++ * @DPSW_METERING_MODE_RFC2698: RFC 2698
++ * @DPSW_METERING_MODE_RFC4115: RFC 4115
++ */
++enum dpsw_metering_mode {
++ DPSW_METERING_MODE_NONE = 0,
++ DPSW_METERING_MODE_RFC2698,
++ DPSW_METERING_MODE_RFC4115
++};
++
++/**
++ * enum dpsw_metering_unit - Metering count
++ * @DPSW_METERING_UNIT_BYTES: count bytes
++ * @DPSW_METERING_UNIT_FRAMES: count frames
++ */
++enum dpsw_metering_unit {
++ DPSW_METERING_UNIT_BYTES = 0,
++ DPSW_METERING_UNIT_FRAMES
++};
++
++/**
++ * struct dpsw_metering_cfg - Metering configuration
++ * @mode: metering modes
++ * @units: Bytes or frame units
++ * @cir: Committed information rate (CIR) in Kbits/s
++ * @eir: Peak information rate (PIR) Kbit/s rfc2698
++ * Excess information rate (EIR) Kbit/s rfc4115
++ * @cbs: Committed burst size (CBS) in bytes
++ * @ebs: Peak burst size (PBS) in bytes for rfc2698
++ * Excess bust size (EBS) in bytes rfc4115
++ *
++ */
++struct dpsw_metering_cfg {
++ enum dpsw_metering_mode mode;
++ enum dpsw_metering_unit units;
++ uint32_t cir;
++ uint32_t eir;
++ uint32_t cbs;
++ uint32_t ebs;
++};
++
++/**
++ * dpsw_if_set_flooding_metering() - Set flooding metering
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Metering parameters
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_metering_cfg *cfg);
++
++/**
++ * dpsw_if_set_metering() - Set interface metering for flooding
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @tc_id: Traffic class ID
++ * @cfg: Metering parameters
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint8_t tc_id,
++ const struct dpsw_metering_cfg *cfg);
++
++/**
++ * enum dpsw_early_drop_unit - DPSW early drop unit
++ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
++ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
++ */
++enum dpsw_early_drop_unit {
++ DPSW_EARLY_DROP_UNIT_BYTE = 0,
++ DPSW_EARLY_DROP_UNIT_FRAMES
++};
++
++/**
++ * enum dpsw_early_drop_mode - DPSW early drop mode
++ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
++ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
++ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
++ */
++enum dpsw_early_drop_mode {
++ DPSW_EARLY_DROP_MODE_NONE = 0,
++ DPSW_EARLY_DROP_MODE_TAIL,
++ DPSW_EARLY_DROP_MODE_WRED
++};
++
++/**
++ * struct dpsw_wred_cfg - WRED configuration
++ * @max_threshold: maximum threshold that packets may be discarded. Above this
++ * threshold all packets are discarded; must be less than 2^39;
++ * approximated to be expressed as (x+256)*2^(y-1) due to HW
++ * implementation.
++ * @min_threshold: minimum threshold that packets may be discarded at
++ * @drop_probability: probability that a packet will be discarded (1-100,
++ * associated with the maximum threshold)
++ */
++struct dpsw_wred_cfg {
++ uint64_t min_threshold;
++ uint64_t max_threshold;
++ uint8_t drop_probability;
++};
++
++/**
++ * struct dpsw_early_drop_cfg - early-drop configuration
++ * @drop_mode: drop mode
++ * @units: count units
++ * @yellow: WRED - 'yellow' configuration
++ * @green: WRED - 'green' configuration
++ * @tail_drop_threshold: tail drop threshold
++ */
++struct dpsw_early_drop_cfg {
++ enum dpsw_early_drop_mode drop_mode;
++ enum dpsw_early_drop_unit units;
++ struct dpsw_wred_cfg yellow;
++ struct dpsw_wred_cfg green;
++ uint32_t tail_drop_threshold;
++};
++
++/**
++ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
++ * @cfg: Early-drop configuration
++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before dpsw_if_tc_set_early_drop
++ *
++ */
++void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
++ uint8_t *early_drop_buf);
++
++/**
++ * dpsw_if_set_early_drop() - Set interface traffic class early-drop
++ * configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 64 bytes;
++ * Must be cacheline-aligned and DMA-able memory
++ *
++ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
++ * to prepare the early_drop_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint8_t tc_id,
++ uint64_t early_drop_iova);
++
++/**
++ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
++ * @tpid: An additional tag protocol identifier
++ */
++struct dpsw_custom_tpid_cfg {
++ uint16_t tpid;
++};
++
++/**
++ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @cfg: Tag Protocol identifier
++ *
++ * API Configures a distinct Ethernet type value (or TPID value)
++ * to indicate a VLAN tag in addition to the common
++ * TPID values 0x8100 and 0x88A8.
++ * Two additional TPID's are supported
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_custom_tpid_cfg *cfg);
++
++/**
++ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @cfg: Tag Protocol identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_custom_tpid_cfg *cfg);
++
++/**
++ * dpsw_if_enable() - Enable Interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id);
++
++/**
++ * dpsw_if_disable() - Disable Interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id);
++
++/**
++ * struct dpsw_if_attr - Structure representing DPSW interface attributes
++ * @num_tcs: Number of traffic classes
++ * @rate: Transmit rate in bits per second
++ * @options: Interface configuration options (bitmap)
++ * @enabled: Indicates if interface is enabled
++ * @accept_all_vlan: The device discards/accepts incoming frames
++ * for VLANs that do not include this interface
++ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
++ * discards untagged frames or priority-tagged frames received on
++ * this interface;
++ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
++ * tagged frames received on this interface are accepted
++ * @qdid: control frames transmit qdid
++ */
++struct dpsw_if_attr {
++ uint8_t num_tcs;
++ uint32_t rate;
++ uint32_t options;
++ int enabled;
++ int accept_all_vlan;
++ enum dpsw_accepted_frames admit_untagged;
++ uint16_t qdid;
++};
++
++/**
++ * dpsw_if_get_attributes() - Function obtains attributes of interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @attr: Returned interface attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_if_attr *attr);
++
++/**
++ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @frame_length: Maximum Frame Length
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint16_t frame_length);
++
++/**
++ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @frame_length: Returned maximum Frame Length
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint16_t *frame_length);
++
++/**
++ * struct dpsw_vlan_cfg - VLAN Configuration
++ * @fdb_id: Forwarding Data Base
++ */
++struct dpsw_vlan_cfg {
++ uint16_t fdb_id;
++};
++
++/**
++ * dpsw_vlan_add() - Adding new VLAN to DPSW.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: VLAN configuration
++ *
++ * Only VLAN ID and FDB ID are required parameters here.
++ * 12 bit VLAN ID is defined in IEEE802.1Q.
++ * Adding a duplicate VLAN ID is not allowed.
++ * FDB ID can be shared across multiple VLANs. Shared learning
++ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
++ * with same fdb_id
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_cfg *cfg);
++
++/**
++ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
++ * @num_ifs: The number of interfaces that are assigned to the egress
++ * list for this VLAN
++ * @if_id: The set of interfaces that are
++ * assigned to the egress list for this VLAN
++ */
++struct dpsw_vlan_if_cfg {
++ uint16_t num_ifs;
++ uint16_t if_id[DPSW_MAX_IF];
++};
++
++/**
++ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces to add
++ *
++ * It adds only interfaces not belonging to this VLAN yet,
++ * otherwise an error is generated and an entire command is
++ * ignored. This function can be called numerous times always
++ * providing required interfaces delta.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
++ * transmitted as untagged.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: set of interfaces that should be transmitted as untagged
++ *
++ * These interfaces should already belong to this VLAN.
++ * By default all interfaces are transmitted as tagged.
++ * Providing un-existing interface or untagged interface that is
++ * configured untagged already generates an error and the entire
++ * command is ignored.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
++ * included in flooding when frame with unknown destination
++ * unicast MAC arrived.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be used for flooding
++ *
++ * These interfaces should belong to this VLAN. By default all
++ * interfaces are included into flooding list. Providing
++ * un-existing interface or an interface that already in the
++ * flooding list generates an error and the entire command is
++ * ignored.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be removed
++ *
++ * Interfaces must belong to this VLAN, otherwise an error
++ * is returned and an the command is ignored
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
++ * converted from transmitted as untagged to transmit as tagged.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: set of interfaces that should be removed
++ *
++ * Interfaces provided by API have to belong to this VLAN and
++ * configured untagged, otherwise an error is returned and the
++ * command is ignored
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
++ * removed from the flooding list.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: set of interfaces used for flooding
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_remove() - Remove an entire VLAN
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id);
++
++/**
++ * struct dpsw_vlan_attr - VLAN attributes
++ * @fdb_id: Associated FDB ID
++ * @num_ifs: Number of interfaces
++ * @num_untagged_ifs: Number of untagged interfaces
++ * @num_flooding_ifs: Number of flooding interfaces
++ */
++struct dpsw_vlan_attr {
++ uint16_t fdb_id;
++ uint16_t num_ifs;
++ uint16_t num_untagged_ifs;
++ uint16_t num_flooding_ifs;
++};
++
++/**
++ * dpsw_vlan_get_attributes() - Get VLAN attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @attr: Returned DPSW attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_attr *attr);
++
++/**
++ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Returned set of interfaces belong to this VLAN
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Returned set of flooding interfaces
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
++ * untagged
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Returned set of untagged interfaces
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * struct dpsw_fdb_cfg - FDB Configuration
++ * @num_fdb_entries: Number of FDB entries
++ * @fdb_aging_time: Aging time in seconds
++ */
++struct dpsw_fdb_cfg {
++ uint16_t num_fdb_entries;
++ uint16_t fdb_aging_time;
++};
++
++/**
++ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
++ * the reference
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Returned Forwarding Database Identifier
++ * @cfg: FDB Configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *fdb_id,
++ const struct dpsw_fdb_cfg *cfg);
++
++/**
++ * dpsw_fdb_remove() - Remove FDB from switch
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id);
++
++/**
++ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
++ * @DPSW_FDB_ENTRY_STATIC: Static entry
++ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
++ */
++enum dpsw_fdb_entry_type {
++ DPSW_FDB_ENTRY_STATIC = 0,
++ DPSW_FDB_ENTRY_DINAMIC = 1
++};
++
++/**
++ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
++ * @type: Select static or dynamic entry
++ * @mac_addr: MAC address
++ * @if_egress: Egress interface ID
++ */
++struct dpsw_fdb_unicast_cfg {
++ enum dpsw_fdb_entry_type type;
++ uint8_t mac_addr[6];
++ uint16_t if_egress;
++};
++
++/**
++ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Unicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg);
++
++/**
++ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
++ * unicast Ethernet address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Returned unicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_unicast_cfg *cfg);
++
++/**
++ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Unicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg);
++
++/**
++ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
++ * @type: Select static or dynamic entry
++ * @mac_addr: MAC address
++ * @num_ifs: Number of external and internal interfaces
++ * @if_id: Egress interface IDs
++ */
++struct dpsw_fdb_multicast_cfg {
++ enum dpsw_fdb_entry_type type;
++ uint8_t mac_addr[6];
++ uint16_t num_ifs;
++ uint16_t if_id[DPSW_MAX_IF];
++};
++
++/**
++ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Multicast entry configuration
++ *
++ * If group doesn't exist, it will be created.
++ * It adds only interfaces not belonging to this multicast group
++ * yet, otherwise error will be generated and the command is
++ * ignored.
++ * This function may be called numerous times always providing
++ * required interfaces delta.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg);
++
++/**
++ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
++ * address.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Returned multicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_multicast_cfg *cfg);
++
++/**
++ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
++ * group.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Multicast entry configuration
++ *
++ * Interfaces provided by this API have to exist in the group,
++ * otherwise an error will be returned and an entire command
++ * ignored. If there is no interface left in the group,
++ * an entire group is deleted
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg);
++
++/**
++ * enum dpsw_fdb_learning_mode - Auto-learning modes
++ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
++ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
++ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
++ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
++ *
++ * NONE - SECURE LEARNING
++ * SMAC found DMAC found CTLU Action
++ * v v Forward frame to
++ * 1. DMAC destination
++ * - v Forward frame to
++ * 1. DMAC destination
++ * 2. Control interface
++ * v - Forward frame to
++ * 1. Flooding list of interfaces
++ * - - Forward frame to
++ * 1. Flooding list of interfaces
++ * 2. Control interface
++ * SECURE LEARING
++ * SMAC found DMAC found CTLU Action
++ * v v Forward frame to
++ * 1. DMAC destination
++ * - v Forward frame to
++ * 1. Control interface
++ * v - Forward frame to
++ * 1. Flooding list of interfaces
++ * - - Forward frame to
++ * 1. Control interface
++ */
++enum dpsw_fdb_learning_mode {
++ DPSW_FDB_LEARNING_MODE_DIS = 0,
++ DPSW_FDB_LEARNING_MODE_HW = 1,
++ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
++ DPSW_FDB_LEARNING_MODE_SECURE = 3
++};
++
++/**
++ * dpsw_fdb_set_learning_mode() - Define FDB learning mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @mode: learning mode
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ enum dpsw_fdb_learning_mode mode);
++
++/**
++ * struct dpsw_fdb_attr - FDB Attributes
++ * @max_fdb_entries: Number of FDB entries
++ * @fdb_aging_time: Aging time in seconds
++ * @learning_mode: Learning mode
++ * @num_fdb_mc_groups: Current number of multicast groups
++ * @max_fdb_mc_groups: Maximum number of multicast groups
++ */
++struct dpsw_fdb_attr {
++ uint16_t max_fdb_entries;
++ uint16_t fdb_aging_time;
++ enum dpsw_fdb_learning_mode learning_mode;
++ uint16_t num_fdb_mc_groups;
++ uint16_t max_fdb_mc_groups;
++};
++
++/**
++ * dpsw_fdb_get_attributes() - Get FDB attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @attr: Returned FDB attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_attr *attr);
++
++/**
++ * struct dpsw_acl_cfg - ACL Configuration
++ * @max_entries: Number of FDB entries
++ */
++struct dpsw_acl_cfg {
++ uint16_t max_entries;
++};
++
++/**
++ * struct dpsw_acl_fields - ACL fields.
++ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
++ * slow protocols, MVRP, STP
++ * @l2_source_mac: Source MAC address
++ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
++ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
++ * Q-in-Q, IPv4, IPv6, PPPoE
++ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
++ * @l2_vlan_id: layer 2 VLAN ID
++ * @l2_ether_type: layer 2 Ethernet type
++ * @l3_dscp: Layer 3 differentiated services code point
++ * @l3_protocol: Tells the Network layer at the destination host, to which
++ * Protocol this packet belongs to. The following protocol are
++ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
++ * (encapsulation), GRE, PTP
++ * @l3_source_ip: Source IPv4 IP
++ * @l3_dest_ip: Destination IPv4 IP
++ * @l4_source_port: Source TCP/UDP Port
++ * @l4_dest_port: Destination TCP/UDP Port
++ */
++struct dpsw_acl_fields {
++ uint8_t l2_dest_mac[6];
++ uint8_t l2_source_mac[6];
++ uint16_t l2_tpid;
++ uint8_t l2_pcp_dei;
++ uint16_t l2_vlan_id;
++ uint16_t l2_ether_type;
++ uint8_t l3_dscp;
++ uint8_t l3_protocol;
++ uint32_t l3_source_ip;
++ uint32_t l3_dest_ip;
++ uint16_t l4_source_port;
++ uint16_t l4_dest_port;
++};
++
++/**
++ * struct dpsw_acl_key - ACL key
++ * @match: Match fields
++ * @mask: Mask: b'1 - valid, b'0 don't care
++ */
++struct dpsw_acl_key {
++ struct dpsw_acl_fields match;
++ struct dpsw_acl_fields mask;
++};
++
++/**
++ * enum dpsw_acl_action
++ * @DPSW_ACL_ACTION_DROP: Drop frame
++ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
++ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
++ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
++ */
++enum dpsw_acl_action {
++ DPSW_ACL_ACTION_DROP,
++ DPSW_ACL_ACTION_REDIRECT,
++ DPSW_ACL_ACTION_ACCEPT,
++ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
++};
++
++/**
++ * struct dpsw_acl_result - ACL action
++ * @action: Action should be taken when ACL entry hit
++ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
++ * action
++ */
++struct dpsw_acl_result {
++ enum dpsw_acl_action action;
++ uint16_t if_id;
++};
++
++/**
++ * struct dpsw_acl_entry_cfg - ACL entry
++ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
++ * to dpsw_acl_prepare_entry_cfg()
++ * @result: Required action when entry hit occurs
++ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
++ * during the lifetime of a Policy. It is user responsibility to
++ * space the priorities according to consequent rule additions.
++ */
++struct dpsw_acl_entry_cfg {
++ uint64_t key_iova;
++ struct dpsw_acl_result result;
++ int precedence;
++};
++
++/**
++ * dpsw_acl_add() - Adds ACL to L2 switch.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: Returned ACL ID, for the future reference
++ * @cfg: ACL configuration
++ *
++ * Create Access Control List. Multiple ACLs can be created and
++ * co-exist in L2 switch
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *acl_id,
++ const struct dpsw_acl_cfg *cfg);
++
++/**
++ * dpsw_acl_remove() - Removes ACL from L2 switch.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id);
++
++/**
++ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
++ * @key: key
++ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before adding or removing acl_entry
++ *
++ */
++void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
++ uint8_t *entry_cfg_buf);
++
++/**
++ * dpsw_acl_add_entry() - Adds an entry to ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: entry configuration
++ *
++ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_entry_cfg *cfg);
++
++/**
++ * dpsw_acl_remove_entry() - Removes an entry from ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: entry configuration
++ *
++ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_entry_cfg *cfg);
++
++/**
++ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
++ * @num_ifs: Number of interfaces
++ * @if_id: List of interfaces
++ */
++struct dpsw_acl_if_cfg {
++ uint16_t num_ifs;
++ uint16_t if_id[DPSW_MAX_IF];
++};
++
++/**
++ * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: interfaces list
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_if_cfg *cfg);
++
++/**
++ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: interfaces list
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_if_cfg *cfg);
++
++/**
++ * struct dpsw_acl_attr - ACL Attributes
++ * @max_entries: Max number of ACL entries
++ * @num_entries: Number of used ACL entries
++ * @num_ifs: Number of interfaces associated with ACL
++ */
++struct dpsw_acl_attr {
++ uint16_t max_entries;
++ uint16_t num_entries;
++ uint16_t num_ifs;
++};
++
++/**
++* dpsw_acl_get_attributes() - Get specific counter of particular interface
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++* @acl_id: ACL Identifier
++* @attr: Returned ACL attributes
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ struct dpsw_acl_attr *attr);
++/**
++* struct dpsw_ctrl_if_attr - Control interface attributes
++* @rx_fqid: Receive FQID
++* @rx_err_fqid: Receive error FQID
++* @tx_err_conf_fqid: Transmit error and confirmation FQID
++*/
++struct dpsw_ctrl_if_attr {
++ uint32_t rx_fqid;
++ uint32_t rx_err_fqid;
++ uint32_t tx_err_conf_fqid;
++};
++
++/**
++* dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++* @attr: Returned control interface attributes
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpsw_ctrl_if_attr *attr);
++
++/**
++ * Maximum number of DPBP
++ */
++#define DPSW_MAX_DPBP 8
++
++/**
++ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
++ * @num_dpbp: Number of DPBPs
++ * @pools: Array of buffer pools parameters; The number of valid entries
++ * must match 'num_dpbp' value
++ */
++struct dpsw_ctrl_if_pools_cfg {
++ uint8_t num_dpbp;
++ /**
++ * struct pools - Buffer pools parameters
++ * @dpbp_id: DPBP object ID
++ * @buffer_size: Buffer size
++ * @backup_pool: Backup pool
++ */
++ struct {
++ int dpbp_id;
++ uint16_t buffer_size;
++ int backup_pool;
++ } pools[DPSW_MAX_DPBP];
++};
++
++/**
++* dpsw_ctrl_if_set_pools() - Set control interface buffer pools
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++* @cfg: buffer pools configuration
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_ctrl_if_pools_cfg *cfg);
++
++/**
++* dpsw_ctrl_if_enable() - Enable control interface
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++* dpsw_ctrl_if_disable() - Function disables control interface
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++#endif /* __FSL_DPSW_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
+@@ -0,0 +1,1711 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_vlan.h>
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++
++#include <uapi/linux/if_bridge.h>
++#include <net/netlink.h>
++
++#include "../../fsl-mc/include/mc.h"
++#include "dpsw.h"
++#include "dpsw-cmd.h"
++
++/* Minimal supported DPSE version */
++#define DPSW_MIN_VER_MAJOR 7
++#define DPSW_MIN_VER_MINOR 0
++
++/* IRQ index */
++#define DPSW_MAX_IRQ_NUM 2
++
++#define ETHSW_VLAN_MEMBER 1
++#define ETHSW_VLAN_UNTAGGED 2
++#define ETHSW_VLAN_PVID 4
++#define ETHSW_VLAN_GLOBAL 8
++
++struct ethsw_port_priv {
++ struct net_device *netdev;
++ struct list_head list;
++ u16 port_index;
++ struct ethsw_dev_priv *ethsw_priv;
++ u8 stp_state;
++
++ char vlans[VLAN_VID_MASK+1];
++
++};
++
++struct ethsw_dev_priv {
++ struct net_device *netdev;
++ struct fsl_mc_io *mc_io;
++ uint16_t dpsw_handle;
++ struct dpsw_attr sw_attr;
++ int dev_id;
++ /*TODO: redundant, we can use the slave dev list */
++ struct list_head port_list;
++
++ bool flood;
++ bool learning;
++
++ char vlans[VLAN_VID_MASK+1];
++};
++
++static int ethsw_port_stop(struct net_device *netdev);
++static int ethsw_port_open(struct net_device *netdev);
++
++static inline void __get_priv(struct net_device *netdev,
++ struct ethsw_dev_priv **priv,
++ struct ethsw_port_priv **port_priv)
++{
++ struct ethsw_dev_priv *_priv = NULL;
++ struct ethsw_port_priv *_port_priv = NULL;
++
++ if (netdev->flags & IFF_MASTER) {
++ _priv = netdev_priv(netdev);
++ } else {
++ _port_priv = netdev_priv(netdev);
++ _priv = _port_priv->ethsw_priv;
++ }
++
++ if (priv)
++ *priv = _priv;
++ if (port_priv)
++ *port_priv = _port_priv;
++}
++
++/* -------------------------------------------------------------------------- */
++/* ethsw netdevice ops */
++
++static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev)
++{
++ /* we don't support I/O for now, drop the frame */
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++}
++
++static int ethsw_open(struct net_device *netdev)
++{
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ struct list_head *pos;
++ struct ethsw_port_priv *port_priv = NULL;
++ int err;
++
++ err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle);
++ if (err) {
++ netdev_err(netdev, "dpsw_enable err %d\n", err);
++ return err;
++ }
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct ethsw_port_priv, list);
++ err = dev_open(port_priv->netdev);
++ if (err)
++ netdev_err(port_priv->netdev, "dev_open err %d\n", err);
++ }
++
++ return 0;
++}
++
++static int ethsw_stop(struct net_device *netdev)
++{
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ struct list_head *pos;
++ struct ethsw_port_priv *port_priv = NULL;
++ int err;
++
++ err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle);
++ if (err) {
++ netdev_err(netdev, "dpsw_disable err %d\n", err);
++ return err;
++ }
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct ethsw_port_priv, list);
++ err = dev_close(port_priv->netdev);
++ if (err)
++ netdev_err(port_priv->netdev,
++ "dev_close err %d\n", err);
++ }
++
++ return 0;
++}
++
++static int ethsw_add_vlan(struct net_device *netdev, u16 vid)
++{
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ int err;
++
++ struct dpsw_vlan_cfg vcfg = {
++ /* TODO: add support for VLAN private FDBs */
++ .fdb_id = 0,
++ };
++ if (priv->vlans[vid]) {
++ netdev_err(netdev, "VLAN already configured\n");
++ return -EEXIST;
++ }
++
++ err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_add err %d\n", err);
++ return err;
++ }
++ priv->vlans[vid] = ETHSW_VLAN_MEMBER;
++
++ return 0;
++}
++
++static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
++ int err;
++
++ struct dpsw_vlan_if_cfg vcfg = {
++ .num_ifs = 1,
++ .if_id[0] = port_priv->port_index,
++ };
++
++ if (port_priv->vlans[vid]) {
++ netdev_err(netdev, "VLAN already configured\n");
++ return -EEXIST;
++ }
++
++ if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) {
++ netdev_err(netdev, "interface must be down to change PVID!\n");
++ return -EBUSY;
++ }
++
++ err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
++
++ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
++ err = dpsw_vlan_add_if_untagged(priv->mc_io, 0,
++ priv->dpsw_handle, vid, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n",
++ err);
++ return err;
++ }
++ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
++ }
++
++ if (flags & BRIDGE_VLAN_INFO_PVID) {
++ struct dpsw_tci_cfg tci_cfg = {
++ /* TODO: at least add better defaults if these cannot
++ * be configured
++ */
++ .pcp = 0,
++ .dei = 0,
++ .vlan_id = vid,
++ };
++
++ err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle,
++ port_priv->port_index, &tci_cfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
++ }
++
++ return 0;
++}
++
++static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = {
++ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
++ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
++ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
++ .len = sizeof(struct bridge_vlan_info), },
++};
++
++static int ethsw_setlink_af_spec(struct net_device *netdev,
++ struct nlattr **tb)
++{
++ struct bridge_vlan_info *vinfo;
++ struct ethsw_dev_priv *priv = NULL;
++ struct ethsw_port_priv *port_priv = NULL;
++ int err = 0;
++
++ if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
++ netdev_err(netdev, "no VLAN INFO in nlmsg\n");
++ return -EOPNOTSUPP;
++ }
++
++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++
++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
++ return -EINVAL;
++
++ __get_priv(netdev, &priv, &port_priv);
++
++ if (!port_priv || !priv->vlans[vinfo->vid]) {
++ /* command targets switch device or this is a new VLAN */
++ err = ethsw_add_vlan(priv->netdev, vinfo->vid);
++ if (err)
++ return err;
++
++ /* command targets switch device; mark it*/
++ if (!port_priv)
++ priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL;
++ }
++
++ if (port_priv) {
++ /* command targets switch port */
++ err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
++ [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
++ [IFLA_BRPORT_COST] = { .type = NLA_U32 },
++ [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
++ [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
++ [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
++ [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
++ [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
++ [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
++};
++
++static int ethsw_set_learning(struct net_device *netdev, u8 flag)
++{
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ enum dpsw_fdb_learning_mode learn_mode;
++ int err;
++
++ if (flag)
++ learn_mode = DPSW_FDB_LEARNING_MODE_HW;
++ else
++ learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
++
++ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle,
++ 0, learn_mode);
++ if (err) {
++ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
++ return err;
++ }
++ priv->learning = !!flag;
++
++ return 0;
++}
++
++static int ethsw_port_set_flood(struct net_device *netdev, u8 flag)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
++ int err;
++
++ err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle,
++ port_priv->port_index, (int)flag);
++ if (err) {
++ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
++ return err;
++ }
++ priv->flood = !!flag;
++
++ return 0;
++}
++
++static int ethsw_port_set_state(struct net_device *netdev, u8 state)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
++ u8 old_state = port_priv->stp_state;
++ int err;
++
++ struct dpsw_stp_cfg stp_cfg = {
++ .vlan_id = 1,
++ .state = state,
++ };
++ /* TODO: check port state, interface may be down */
++
++ if (state > BR_STATE_BLOCKING)
++ return -EINVAL;
++
++ if (state == port_priv->stp_state)
++ return 0;
++
++ if (state == BR_STATE_DISABLED) {
++ port_priv->stp_state = state;
++
++ err = ethsw_port_stop(netdev);
++ if (err)
++ goto error;
++ } else {
++ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle,
++ port_priv->port_index, &stp_cfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_set_stp err %d\n", err);
++ return err;
++ }
++
++ port_priv->stp_state = state;
++
++ if (old_state == BR_STATE_DISABLED) {
++ err = ethsw_port_open(netdev);
++ if (err)
++ goto error;
++ }
++ }
++
++ return 0;
++error:
++ port_priv->stp_state = old_state;
++ return err;
++}
++
++static int ethsw_setlink_protinfo(struct net_device *netdev,
++ struct nlattr **tb)
++{
++ struct ethsw_dev_priv *priv;
++ struct ethsw_port_priv *port_priv = NULL;
++ int err = 0;
++
++ __get_priv(netdev, &priv, &port_priv);
++
++ if (tb[IFLA_BRPORT_LEARNING]) {
++ u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]);
++
++ if (port_priv)
++ netdev_warn(netdev,
++ "learning set on whole switch dev\n");
++
++ err = ethsw_set_learning(priv->netdev, flag);
++ if (err)
++ return err;
++
++ } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) {
++ u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]);
++
++ err = ethsw_port_set_flood(port_priv->netdev, flag);
++ if (err)
++ return err;
++
++ } else if (tb[IFLA_BRPORT_STATE] && port_priv) {
++ u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]);
++
++ err = ethsw_port_set_state(port_priv->netdev, state);
++ if (err)
++ return err;
++
++ } else {
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int ethsw_setlink(struct net_device *netdev,
++ struct nlmsghdr *nlh,
++ u16 flags)
++{
++ struct nlattr *attr;
++ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
++ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX+1];
++ int err = 0;
++
++ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (attr) {
++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
++ ifla_br_policy);
++ if (err) {
++ netdev_err(netdev,
++ "nla_parse_nested for br_policy err %d\n",
++ err);
++ return err;
++ }
++
++ err = ethsw_setlink_af_spec(netdev, tb);
++ return err;
++ }
++
++ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
++ if (attr) {
++ err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr,
++ ifla_brport_policy);
++ if (err) {
++ netdev_err(netdev,
++ "nla_parse_nested for brport_policy err %d\n",
++ err);
++ return err;
++ }
++
++ err = ethsw_setlink_protinfo(netdev, tb);
++ return err;
++ }
++
++ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n");
++ return -EOPNOTSUPP;
++}
++
++static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev,
++ struct ethsw_dev_priv *priv)
++{
++ u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN;
++ int iflink;
++ int err;
++
++ err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
++ if (err)
++ goto nla_put_err;
++ if (netdev->addr_len) {
++ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
++ netdev->dev_addr);
++ if (err)
++ goto nla_put_err;
++ }
++
++ iflink = dev_get_iflink(netdev);
++ if (netdev->ifindex != iflink) {
++ err = nla_put_u32(skb, IFLA_LINK, iflink);
++ if (err)
++ goto nla_put_err;
++ }
++
++ return 0;
++
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ return err;
++}
++
++static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev,
++ struct ethsw_port_priv *port_priv)
++{
++ struct nlattr *nest;
++ int err;
++
++ u8 stp_state = port_priv->stp_state;
++
++ if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING)
++ stp_state = BR_STATE_BLOCKING;
++
++ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
++ if (!nest) {
++ netdev_err(netdev, "nla_nest_start failed\n");
++ return -ENOMEM;
++ }
++
++ err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING,
++ port_priv->ethsw_priv->learning);
++ if (err)
++ goto nla_put_err;
++ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
++ port_priv->ethsw_priv->flood);
++ if (err)
++ goto nla_put_err;
++ nla_nest_end(skb, nest);
++
++ return 0;
++
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ nla_nest_cancel(skb, nest);
++ return err;
++}
++
++static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev,
++ struct ethsw_dev_priv *priv,
++ struct ethsw_port_priv *port_priv)
++{
++ struct nlattr *nest;
++ struct bridge_vlan_info vinfo;
++ const char *vlans;
++ u16 i;
++ int err;
++
++ nest = nla_nest_start(skb, IFLA_AF_SPEC);
++ if (!nest) {
++ netdev_err(netdev, "nla_nest_start failed");
++ return -ENOMEM;
++ }
++
++ if (port_priv)
++ vlans = port_priv->vlans;
++ else
++ vlans = priv->vlans;
++
++ for (i = 0; i < VLAN_VID_MASK+1; i++) {
++ vinfo.flags = 0;
++ vinfo.vid = i;
++
++ if (vlans[i] & ETHSW_VLAN_UNTAGGED)
++ vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
++
++ if (vlans[i] & ETHSW_VLAN_PVID)
++ vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
++
++ if (vlans[i] & ETHSW_VLAN_MEMBER) {
++ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
++ sizeof(vinfo), &vinfo);
++ if (err)
++ goto nla_put_err;
++ }
++ }
++
++ nla_nest_end(skb, nest);
++
++ return 0;
++nla_put_err:
++ netdev_err(netdev, "nla_put_ err %d\n", err);
++ nla_nest_cancel(skb, nest);
++ return err;
++}
++
++static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq,
++ struct net_device *netdev, u32 filter_mask,
++ int nlflags)
++{
++ struct ethsw_dev_priv *priv;
++ struct ethsw_port_priv *port_priv = NULL;
++ struct ifinfomsg *hdr;
++ struct nlmsghdr *nlh;
++ int err;
++
++ __get_priv(netdev, &priv, &port_priv);
++
++ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
++ if (!nlh)
++ return -EMSGSIZE;
++
++ hdr = nlmsg_data(nlh);
++ memset(hdr, 0, sizeof(*hdr));
++ hdr->ifi_family = AF_BRIDGE;
++ hdr->ifi_type = netdev->type;
++ hdr->ifi_index = netdev->ifindex;
++ hdr->ifi_flags = dev_get_flags(netdev);
++
++ err = __nla_put_netdev(skb, netdev, priv);
++ if (err)
++ goto nla_put_err;
++
++ if (port_priv) {
++ err = __nla_put_port(skb, netdev, port_priv);
++ if (err)
++ goto nla_put_err;
++ }
++
++ /* Check if the VID information is requested */
++ if (filter_mask & RTEXT_FILTER_BRVLAN) {
++ err = __nla_put_vlan(skb, netdev, priv, port_priv);
++ if (err)
++ goto nla_put_err;
++ }
++
++ nlmsg_end(skb, nlh);
++ return skb->len;
++
++nla_put_err:
++ nlmsg_cancel(skb, nlh);
++ return -EMSGSIZE;
++}
++
++static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid)
++{
++ struct list_head *pos;
++ struct ethsw_port_priv *ppriv_local = NULL;
++ int err = 0;
++
++ if (!priv->vlans[vid])
++ return -ENOENT;
++
++ err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid);
++ if (err) {
++ netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err);
++ return err;
++ }
++ priv->vlans[vid] = 0;
++
++ list_for_each(pos, &priv->port_list) {
++ ppriv_local = list_entry(pos, struct ethsw_port_priv,
++ list);
++ ppriv_local->vlans[vid] = 0;
++ }
++
++ return 0;
++}
++
++static int ethsw_dellink_port(struct ethsw_dev_priv *priv,
++ struct ethsw_port_priv *port_priv,
++ u16 vid)
++{
++ struct list_head *pos;
++ struct ethsw_port_priv *ppriv_local = NULL;
++ struct dpsw_vlan_if_cfg vcfg = {
++ .num_ifs = 1,
++ .if_id[0] = port_priv->port_index,
++ };
++ unsigned int count = 0;
++ int err = 0;
++
++ if (!port_priv->vlans[vid])
++ return -ENOENT;
++
++ /* VLAN will be deleted from switch if global flag is not set
++ * and is configured on only one port
++ */
++ if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) {
++ list_for_each(pos, &priv->port_list) {
++ ppriv_local = list_entry(pos, struct ethsw_port_priv,
++ list);
++ if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER)
++ count++;
++ }
++
++ if (count == 1)
++ return ethsw_dellink_switch(priv, vid);
++ }
++
++ err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle,
++ vid, &vcfg);
++ if (err) {
++ netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vid] = 0;
++ return 0;
++}
++
++static int ethsw_dellink(struct net_device *netdev,
++ struct nlmsghdr *nlh,
++ u16 flags)
++{
++ struct nlattr *tb[IFLA_BRIDGE_MAX+1];
++ struct nlattr *spec;
++ struct bridge_vlan_info *vinfo;
++ struct ethsw_dev_priv *priv;
++ struct ethsw_port_priv *port_priv = NULL;
++ int err = 0;
++
++ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (!spec)
++ return 0;
++
++ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
++ if (err)
++ return err;
++
++ if (!tb[IFLA_BRIDGE_VLAN_INFO])
++ return -EOPNOTSUPP;
++
++ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++
++ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
++ return -EINVAL;
++
++ __get_priv(netdev, &priv, &port_priv);
++
++ /* decide if command targets switch device or port */
++ if (!port_priv)
++ err = ethsw_dellink_switch(priv, vinfo->vid);
++ else
++ err = ethsw_dellink_port(priv, port_priv, vinfo->vid);
++
++ return err;
++}
++
++static const struct net_device_ops ethsw_ops = {
++ .ndo_open = ðsw_open,
++ .ndo_stop = ðsw_stop,
++
++ .ndo_bridge_setlink = ðsw_setlink,
++ .ndo_bridge_getlink = ðsw_getlink,
++ .ndo_bridge_dellink = ðsw_dellink,
++
++ .ndo_start_xmit = ðsw_dropframe,
++};
++
++/*--------------------------------------------------------------------------- */
++/* switch port netdevice ops */
++
++static int _ethsw_port_carrier_state_sync(struct net_device *netdev)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_state state;
++ int err;
++
++ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index, &state);
++ if (unlikely(err)) {
++ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
++ return err;
++ }
++
++ WARN_ONCE(state.up > 1, "Garbage read into link_state");
++
++ if (state.up)
++ netif_carrier_on(port_priv->netdev);
++ else
++ netif_carrier_off(port_priv->netdev);
++
++ return 0;
++}
++
++static int ethsw_port_open(struct net_device *netdev)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
++
++ if (!netif_oper_up(netdev) ||
++ port_priv->stp_state == BR_STATE_DISABLED)
++ return 0;
++
++ err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
++ return err;
++ }
++
++ return 0;
++}
++
++static int ethsw_port_stop(struct net_device *netdev)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
++
++ err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
++ return err;
++ }
++
++ return 0;
++}
++
++static int ethsw_port_fdb_add_uc(struct net_device *netdev,
++ const unsigned char *addr)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_fdb_unicast_cfg entry = {0};
++ int err;
++
++ entry.if_egress = port_priv->port_index;
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ ether_addr_copy(entry.mac_addr, addr);
++
++ err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err);
++ return err;
++}
++
++static int ethsw_port_fdb_del_uc(struct net_device *netdev,
++ const unsigned char *addr)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_fdb_unicast_cfg entry = {0};
++ int err;
++
++ entry.if_egress = port_priv->port_index;
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ ether_addr_copy(entry.mac_addr, addr);
++
++ err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err);
++ return err;
++}
++
++static int ethsw_port_fdb_add_mc(struct net_device *netdev,
++ const unsigned char *addr)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_fdb_multicast_cfg entry = {0};
++ int err;
++
++ ether_addr_copy(entry.mac_addr, addr);
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ entry.num_ifs = 1;
++ entry.if_id[0] = port_priv->port_index;
++
++ err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err);
++ return err;
++}
++
++static int ethsw_port_fdb_del_mc(struct net_device *netdev,
++ const unsigned char *addr)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_fdb_multicast_cfg entry = {0};
++ int err;
++
++ ether_addr_copy(entry.mac_addr, addr);
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ entry.num_ifs = 1;
++ entry.if_id[0] = port_priv->port_index;
++
++ err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err);
++ return err;
++}
++
++static int _lookup_address(struct net_device *netdev, int is_uc,
++ const unsigned char *addr)
++{
++ struct netdev_hw_addr *ha;
++ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
++
++ netif_addr_lock_bh(netdev);
++ list_for_each_entry(ha, &list->list, list) {
++ if (ether_addr_equal(ha->addr, addr)) {
++ netif_addr_unlock_bh(netdev);
++ return 1;
++ }
++ }
++ netif_addr_unlock_bh(netdev);
++ return 0;
++}
++
++static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *netdev,
++ const unsigned char *addr, u16 vid,
++ u16 flags)
++{
++ struct list_head *pos;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
++ int err;
++
++ /* TODO: add replace support when added to iproute bridge */
++ if (!(flags & NLM_F_REQUEST)) {
++ netdev_err(netdev,
++ "ethsw_port_fdb_add unexpected flags value %08x\n",
++ flags);
++ return -EINVAL;
++ }
++
++ if (is_unicast_ether_addr(addr)) {
++ /* if entry cannot be replaced, return error if exists */
++ if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) {
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos,
++ struct ethsw_port_priv,
++ list);
++ if (_lookup_address(port_priv->netdev,
++ 1, addr))
++ return -EEXIST;
++ }
++ }
++
++ err = ethsw_port_fdb_add_uc(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n",
++ err);
++ return err;
++ }
++
++ /* we might have replaced an existing entry for a different
++ * switch port, make sure the address doesn't linger in any
++ * port address list
++ */
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct ethsw_port_priv,
++ list);
++ dev_uc_del(port_priv->netdev, addr);
++ }
++
++ err = dev_uc_add(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "dev_uc_add err %d\n", err);
++ return err;
++ }
++ } else {
++ struct dpsw_fdb_multicast_cfg entry = {
++ .type = DPSW_FDB_ENTRY_STATIC,
++ .num_ifs = 0,
++ };
++
++ /* check if address is already set on this port */
++ if (_lookup_address(netdev, 0, addr))
++ return -EEXIST;
++
++ /* check if the address exists on other port */
++ ether_addr_copy(entry.mac_addr, addr);
++ err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle,
++ 0, &entry);
++ if (!err) {
++ /* entry exists, can we replace it? */
++ if (flags & NLM_F_EXCL)
++ return -EEXIST;
++ } else if (err != -ENAVAIL) {
++ netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n",
++ err);
++ return err;
++ }
++
++ err = ethsw_port_fdb_add_mc(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n",
++ err);
++ return err;
++ }
++
++ err = dev_mc_add(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "dev_mc_add err %d\n", err);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *netdev,
++ const unsigned char *addr, u16 vid)
++{
++ int err;
++
++ if (is_unicast_ether_addr(addr)) {
++ err = ethsw_port_fdb_del_uc(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n",
++ err);
++ return err;
++ }
++
++ /* also delete if configured on port */
++ err = dev_uc_del(netdev, addr);
++ if (err && err != -ENOENT) {
++ netdev_err(netdev, "dev_uc_del err %d\n", err);
++ return err;
++ }
++ } else {
++ if (!_lookup_address(netdev, 0, addr))
++ return -ENOENT;
++
++ err = dev_mc_del(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "dev_mc_del err %d\n", err);
++ return err;
++ }
++
++ err = ethsw_port_fdb_del_mc(netdev, addr);
++ if (err) {
++ netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n",
++ err);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++static struct rtnl_link_stats64 *
++ethsw_port_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *storage)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ u64 tmp;
++ int err;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_ING_FRAME, &storage->rx_packets);
++ if (err)
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_EGR_FRAME, &storage->tx_packets);
++ if (err)
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_ING_BYTE, &storage->rx_bytes);
++ if (err)
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_EGR_BYTE, &storage->tx_bytes);
++ if (err)
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_ING_FRAME_DISCARD,
++ &storage->rx_dropped);
++ if (err)
++ goto error;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_ING_FLTR_FRAME,
++ &tmp);
++ if (err)
++ goto error;
++ storage->rx_dropped += tmp;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ DPSW_CNT_EGR_FRAME_DISCARD,
++ &storage->tx_dropped);
++ if (err)
++ goto error;
++
++ return storage;
++
++error:
++ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
++ return storage;
++}
++
++static const struct net_device_ops ethsw_port_ops = {
++ .ndo_open = ðsw_port_open,
++ .ndo_stop = ðsw_port_stop,
++
++ .ndo_fdb_add = ðsw_port_fdb_add,
++ .ndo_fdb_del = ðsw_port_fdb_del,
++ .ndo_fdb_dump = &ndo_dflt_fdb_dump,
++
++ .ndo_get_stats64 = ðsw_port_get_stats,
++
++ .ndo_start_xmit = ðsw_dropframe,
++};
++
++static struct {
++ enum dpsw_counter id;
++ char name[ETH_GSTRING_LEN];
++} ethsw_ethtool_counters[] = {
++ {DPSW_CNT_ING_FRAME, "rx frames"},
++ {DPSW_CNT_ING_BYTE, "rx bytes"},
++ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
++ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
++ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
++ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
++ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
++ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
++ {DPSW_CNT_EGR_FRAME, "tx frames"},
++ {DPSW_CNT_EGR_BYTE, "tx bytes"},
++ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
++
++};
++
++static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(ethsw_ethtool_counters);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static void ethsw_ethtool_get_strings(struct net_device *netdev,
++ u32 stringset, u8 *data)
++{
++ int i;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
++ break;
++ }
++}
++
++static void ethsw_ethtool_get_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int i;
++ int err;
++
++ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) {
++ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
++ port_priv->ethsw_priv->dpsw_handle,
++ port_priv->port_index,
++ ethsw_ethtool_counters[i].id,
++ &data[i]);
++ if (err)
++ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
++ ethsw_ethtool_counters[i].name, err);
++ }
++}
++
++static const struct ethtool_ops ethsw_port_ethtool_ops = {
++ .get_strings = ðsw_ethtool_get_strings,
++ .get_ethtool_stats = ðsw_ethtool_get_stats,
++ .get_sset_count = ðsw_ethtool_get_sset_count,
++};
++
++/* -------------------------------------------------------------------------- */
++/* ethsw driver functions */
++
++static int ethsw_links_state_update(struct ethsw_dev_priv *priv)
++{
++ struct list_head *pos;
++ struct ethsw_port_priv *port_priv;
++ int err;
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct ethsw_port_priv,
++ list);
++
++ err = _ethsw_port_carrier_state_sync(port_priv->netdev);
++ if (err)
++ netdev_err(port_priv->netdev,
++ "_ethsw_port_carrier_state_sync err %d\n",
++ err);
++ }
++
++ return 0;
++}
++
++static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct fsl_mc_device *sw_dev = to_fsl_mc_device(dev);
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++
++ struct fsl_mc_io *io = priv->mc_io;
++ uint16_t token = priv->dpsw_handle;
++ int irq_index = DPSW_IRQ_INDEX_IF;
++
++ /* Mask the events and the if_id reserved bits to be cleared on read */
++ uint32_t status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
++ int err;
++
++ /* Sanity check */
++ if (WARN_ON(!sw_dev || !sw_dev->irqs || !sw_dev->irqs[irq_index]))
++ goto out;
++ if (WARN_ON(sw_dev->irqs[irq_index]->msi_desc->irq != irq_num))
++ goto out;
++
++ err = dpsw_get_irq_status(io, 0, token, irq_index, &status);
++ if (unlikely(err)) {
++ netdev_err(netdev, "Can't get irq status (err %d)", err);
++
++ err = dpsw_clear_irq_status(io, 0, token, irq_index,
++ 0xFFFFFFFF);
++ if (unlikely(err))
++ netdev_err(netdev, "Can't clear irq status (err %d)",
++ err);
++ goto out;
++ }
++
++ if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
++ err = ethsw_links_state_update(priv);
++ if (unlikely(err))
++ goto out;
++ }
++
++out:
++ return IRQ_HANDLED;
++}
++
++static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ int err = 0;
++ struct fsl_mc_device_irq *irq;
++ const int irq_index = DPSW_IRQ_INDEX_IF;
++ uint32_t mask = DPSW_IRQ_EVENT_LINK_CHANGED;
++
++ err = fsl_mc_allocate_irqs(sw_dev);
++ if (unlikely(err)) {
++ dev_err(dev, "MC irqs allocation failed\n");
++ return err;
++ }
++
++ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) {
++ err = -EINVAL;
++ goto free_irq;
++ }
++
++ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
++ irq_index, 0);
++ if (unlikely(err)) {
++ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
++ goto free_irq;
++ }
++
++ irq = sw_dev->irqs[irq_index];
++
++ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
++ ethsw_irq0_handler,
++ _ethsw_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(dev), dev);
++ if (unlikely(err)) {
++ dev_err(dev, "devm_request_threaded_irq(): %d", err);
++ goto free_irq;
++ }
++
++ err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle,
++ irq_index, mask);
++ if (unlikely(err)) {
++ dev_err(dev, "dpsw_set_irq_mask(): %d", err);
++ goto free_devm_irq;
++ }
++
++ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
++ irq_index, 1);
++ if (unlikely(err)) {
++ dev_err(dev, "dpsw_set_irq_enable(): %d", err);
++ goto free_devm_irq;
++ }
++
++ return 0;
++
++free_devm_irq:
++ devm_free_irq(dev, irq->msi_desc->irq, dev);
++free_irq:
++ fsl_mc_free_irqs(sw_dev);
++ return err;
++}
++
++static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++
++ dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0);
++ devm_free_irq(dev,
++ sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq,
++ dev);
++ fsl_mc_free_irqs(sw_dev);
++}
++
++static int __cold
++ethsw_init(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_dev_priv *priv;
++ struct net_device *netdev;
++ int err = 0;
++ u16 i;
++ const struct dpsw_stp_cfg stp_cfg = {
++ .vlan_id = 1,
++ .state = DPSW_STP_STATE_FORWARDING,
++ };
++
++ netdev = dev_get_drvdata(dev);
++ priv = netdev_priv(netdev);
++
++ priv->dev_id = sw_dev->obj_desc.id;
++
++ err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle);
++ if (err) {
++ dev_err(dev, "dpsw_open err %d\n", err);
++ goto err_exit;
++ }
++ if (!priv->dpsw_handle) {
++ dev_err(dev, "dpsw_open returned null handle but no error\n");
++ err = -EFAULT;
++ goto err_exit;
++ }
++
++ err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle,
++ &priv->sw_attr);
++ if (err) {
++ dev_err(dev, "dpsw_get_attributes err %d\n", err);
++ goto err_close;
++ }
++
++ /* Minimum supported DPSW version check */
++ if (priv->sw_attr.version.major < DPSW_MIN_VER_MAJOR ||
++ (priv->sw_attr.version.major == DPSW_MIN_VER_MAJOR &&
++ priv->sw_attr.version.minor < DPSW_MIN_VER_MINOR)) {
++ dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
++ priv->sw_attr.version.major,
++ priv->sw_attr.version.minor,
++ DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
++ err = -ENOTSUPP;
++ goto err_close;
++ }
++
++ err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle);
++ if (err) {
++ dev_err(dev, "dpsw_reset err %d\n", err);
++ goto err_close;
++ }
++
++ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0,
++ DPSW_FDB_LEARNING_MODE_HW);
++ if (err) {
++ dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
++ goto err_close;
++ }
++
++ for (i = 0; i < priv->sw_attr.num_ifs; i++) {
++ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i,
++ &stp_cfg);
++ if (err) {
++ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
++ err, i);
++ goto err_close;
++ }
++
++ err = dpsw_if_set_broadcast(priv->mc_io, 0,
++ priv->dpsw_handle, i, 1);
++ if (err) {
++ dev_err(dev,
++ "dpsw_if_set_broadcast err %d for port %d\n",
++ err, i);
++ goto err_close;
++ }
++ }
++
++ return 0;
++
++err_close:
++ dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
++err_exit:
++ return err;
++}
++
++static int __cold
++ethsw_takedown(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct net_device *netdev;
++ struct ethsw_dev_priv *priv;
++ int err;
++
++ netdev = dev_get_drvdata(dev);
++ priv = netdev_priv(netdev);
++
++ err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
++ if (err)
++ dev_warn(dev, "dpsw_close err %d\n", err);
++
++ return 0;
++}
++
++static int __cold
++ethsw_remove(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev;
++ struct net_device *netdev;
++ struct ethsw_dev_priv *priv;
++ struct ethsw_port_priv *port_priv;
++ struct list_head *pos;
++
++ dev = &sw_dev->dev;
++ netdev = dev_get_drvdata(dev);
++ priv = netdev_priv(netdev);
++
++ list_for_each(pos, &priv->port_list) {
++ port_priv = list_entry(pos, struct ethsw_port_priv, list);
++
++ rtnl_lock();
++ netdev_upper_dev_unlink(port_priv->netdev, netdev);
++ rtnl_unlock();
++
++ unregister_netdev(port_priv->netdev);
++ free_netdev(port_priv->netdev);
++ }
++
++ ethsw_teardown_irqs(sw_dev);
++
++ unregister_netdev(netdev);
++
++ ethsw_takedown(sw_dev);
++ fsl_mc_portal_free(priv->mc_io);
++
++ dev_set_drvdata(dev, NULL);
++ free_netdev(netdev);
++
++ return 0;
++}
++
++static int __cold
++ethsw_probe(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev;
++ struct net_device *netdev = NULL;
++ struct ethsw_dev_priv *priv = NULL;
++ int err = 0;
++ u16 i;
++ const char def_mcast[ETH_ALEN] = {
++ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01,
++ };
++ char port_name[IFNAMSIZ];
++
++ dev = &sw_dev->dev;
++
++ /* register switch device, it's for management only - no I/O */
++ netdev = alloc_etherdev(sizeof(*priv));
++ if (!netdev) {
++ dev_err(dev, "alloc_etherdev error\n");
++ return -ENOMEM;
++ }
++ netdev->netdev_ops = ðsw_ops;
++
++ SET_NETDEV_DEV(netdev, dev);
++ dev_set_drvdata(dev, netdev);
++
++ priv = netdev_priv(netdev);
++ priv->netdev = netdev;
++
++ err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io);
++ if (err) {
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ goto err_free_netdev;
++ }
++ if (!priv->mc_io) {
++ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
++ err = -EFAULT;
++ goto err_free_netdev;
++ }
++
++ err = ethsw_init(sw_dev);
++ if (err) {
++ dev_err(dev, "switch init err %d\n", err);
++ goto err_free_cmdport;
++ }
++
++ netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER;
++
++ /* TODO: should we hold rtnl_lock here? We can't register_netdev under
++ * lock
++ */
++ dev_alloc_name(netdev, "sw%d");
++ err = register_netdev(netdev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev error %d\n", err);
++ goto err_takedown;
++ }
++ if (err)
++ dev_info(dev, "register_netdev res %d\n", err);
++
++ /* VLAN 1 is implicitly configured on the switch */
++ priv->vlans[1] = ETHSW_VLAN_MEMBER;
++ /* Flooding, learning are implicitly enabled */
++ priv->learning = true;
++ priv->flood = true;
++
++ /* register switch ports */
++ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
++
++ INIT_LIST_HEAD(&priv->port_list);
++ for (i = 0; i < priv->sw_attr.num_ifs; i++) {
++ struct net_device *port_netdev;
++ struct ethsw_port_priv *port_priv;
++
++ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
++ if (!port_netdev) {
++ dev_err(dev, "alloc_etherdev error\n");
++ goto err_takedown;
++ }
++
++ port_priv = netdev_priv(port_netdev);
++ port_priv->netdev = port_netdev;
++ port_priv->ethsw_priv = priv;
++
++ port_priv->port_index = i;
++ port_priv->stp_state = BR_STATE_FORWARDING;
++ /* VLAN 1 is configured by default on all switch ports */
++ port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED |
++ ETHSW_VLAN_PVID;
++
++ SET_NETDEV_DEV(port_netdev, dev);
++ port_netdev->netdev_ops = ðsw_port_ops;
++ port_netdev->ethtool_ops = ðsw_port_ethtool_ops;
++
++ port_netdev->flags = port_netdev->flags |
++ IFF_PROMISC | IFF_SLAVE;
++
++ dev_alloc_name(port_netdev, port_name);
++ err = register_netdev(port_netdev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev error %d\n", err);
++ free_netdev(port_netdev);
++ goto err_takedown;
++ }
++
++ rtnl_lock();
++
++ err = netdev_master_upper_dev_link(port_netdev, netdev, NULL, NULL);
++ if (err) {
++ dev_err(dev, "netdev_master_upper_dev_link error %d\n",
++ err);
++ unregister_netdev(port_netdev);
++ free_netdev(port_netdev);
++ rtnl_unlock();
++ goto err_takedown;
++ }
++
++ rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL);
++
++ rtnl_unlock();
++
++ list_add(&port_priv->list, &priv->port_list);
++
++ /* TODO: implmenet set_rm_mode instead of this */
++ err = ethsw_port_fdb_add_mc(port_netdev, def_mcast);
++ if (err)
++ dev_warn(&netdev->dev,
++ "ethsw_port_fdb_add_mc err %d\n", err);
++
++
++ /* sync carrier state */
++ err = _ethsw_port_carrier_state_sync(port_netdev);
++ if (err)
++ netdev_err(netdev,
++ "_ethsw_port_carrier_state_sync err %d\n",
++ err);
++ }
++
++ /* the switch starts up enabled */
++ rtnl_lock();
++ err = dev_open(netdev);
++ rtnl_unlock();
++ if (err)
++ dev_warn(dev, "dev_open err %d\n", err);
++
++ /* setup irqs */
++ err = ethsw_setup_irqs(sw_dev);
++ if (unlikely(err)) {
++ dev_warn(dev, "ethsw_setup_irqs err %d\n", err);
++ goto err_takedown;
++ }
++
++ dev_info(&netdev->dev,
++ "probed %d port switch\n", priv->sw_attr.num_ifs);
++ return 0;
++
++err_takedown:
++ ethsw_remove(sw_dev);
++err_free_cmdport:
++ fsl_mc_portal_free(priv->mc_io);
++err_free_netdev:
++ dev_set_drvdata(dev, NULL);
++ free_netdev(netdev);
++
++ return err;
++}
++
++static const struct fsl_mc_device_match_id ethsw_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpsw",
++ .ver_major = DPSW_VER_MAJOR,
++ .ver_minor = DPSW_VER_MINOR,
++ },
++ {}
++};
++
++static struct fsl_mc_driver eth_sw_drv = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = ethsw_probe,
++ .remove = ethsw_remove,
++ .match_id_table = ethsw_match_id_table,
++};
++
++module_fsl_mc_driver(eth_sw_drv);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)");
+--- a/drivers/staging/fsl-mc/include/net.h
++++ b/drivers/staging/fsl-mc/include/net.h
+@@ -367,7 +367,6 @@
+ /*************************** GTP fields ************************************/
+ #define NH_FLD_GTP_TEID (1)
+
+-
+ /* Protocol options */
+
+ /* Ethernet options */
diff --git a/target/linux/layerscape/patches-4.4/7221-dpaa2-ethsw-match-id-cleanup.patch b/target/linux/layerscape/patches-4.4/7221-dpaa2-ethsw-match-id-cleanup.patch
new file mode 100644
index 0000000..eac1340
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7221-dpaa2-ethsw-match-id-cleanup.patch
@@ -0,0 +1,26 @@
+From 535826c8b725f752e5da17ea576d6d96e7d53f13 Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Fri, 15 Jul 2016 13:13:41 -0500
+Subject: [PATCH 221/226] dpaa2-ethsw: match id cleanup
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/ethsw/switch.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/staging/fsl-dpaa2/ethsw/switch.c
++++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
+@@ -1685,12 +1685,10 @@ err_free_netdev:
+ return err;
+ }
+
+-static const struct fsl_mc_device_match_id ethsw_match_id_table[] = {
++static const struct fsl_mc_device_id ethsw_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpsw",
+- .ver_major = DPSW_VER_MAJOR,
+- .ver_minor = DPSW_VER_MINOR,
+ },
+ {}
+ };
diff --git a/target/linux/layerscape/patches-4.4/7222-dpaa2-ethsw-fix-compile-error-on-backport-to-4.4.patch b/target/linux/layerscape/patches-4.4/7222-dpaa2-ethsw-fix-compile-error-on-backport-to-4.4.patch
new file mode 100644
index 0000000..1649921
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7222-dpaa2-ethsw-fix-compile-error-on-backport-to-4.4.patch
@@ -0,0 +1,21 @@
+From c51ed10a001d3fd5b80b7bb92f2d5182f1d9fa5a Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Thu, 25 Aug 2016 16:10:12 -0500
+Subject: [PATCH 222/226] dpaa2-ethsw: fix compile error on backport to 4.4
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/ethsw/switch.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/fsl-dpaa2/ethsw/switch.c
++++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
+@@ -1625,7 +1625,7 @@ ethsw_probe(struct fsl_mc_device *sw_dev
+
+ rtnl_lock();
+
+- err = netdev_master_upper_dev_link(port_netdev, netdev, NULL, NULL);
++ err = netdev_master_upper_dev_link(port_netdev, netdev);
+ if (err) {
+ dev_err(dev, "netdev_master_upper_dev_link error %d\n",
+ err);
diff --git a/target/linux/layerscape/patches-4.4/7223-irqdomain-Added-domain-bus-token-DOMAIN_BUS_FSL_MC_M.patch b/target/linux/layerscape/patches-4.4/7223-irqdomain-Added-domain-bus-token-DOMAIN_BUS_FSL_MC_M.patch
new file mode 100644
index 0000000..1d719e0
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7223-irqdomain-Added-domain-bus-token-DOMAIN_BUS_FSL_MC_M.patch
@@ -0,0 +1,26 @@
+From b565bd9a6011819ff66bd4fa0a50f7e54dff2753 Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:19 -0600
+Subject: [PATCH 223/226] irqdomain: Added domain bus token
+ DOMAIN_BUS_FSL_MC_MSI
+
+Since an FSL-MC bus is a new bus type that is neither PCI nor
+PLATFORM, we need a new domain bus token to disambiguate the
+IRQ domain for FSL-MC MSIs.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ include/linux/irqdomain.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/linux/irqdomain.h
++++ b/include/linux/irqdomain.h
+@@ -73,6 +73,7 @@ enum irq_domain_bus_token {
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
++ DOMAIN_BUS_FSL_MC_MSI,
+ };
+
+ /**
diff --git a/target/linux/layerscape/patches-4.4/7224-fsl-mc-msi-Added-FSL-MC-specific-member-to-the-msi_d.patch b/target/linux/layerscape/patches-4.4/7224-fsl-mc-msi-Added-FSL-MC-specific-member-to-the-msi_d.patch
new file mode 100644
index 0000000..8dd11f4
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7224-fsl-mc-msi-Added-FSL-MC-specific-member-to-the-msi_d.patch
@@ -0,0 +1,40 @@
+From 359c7977e003781024154da61e55181b92b12bdf Mon Sep 17 00:00:00 2001
+From: "J. German Rivera" <German.Rivera at freescale.com>
+Date: Wed, 6 Jan 2016 16:03:20 -0600
+Subject: [PATCH 224/226] fsl-mc: msi: Added FSL-MC-specific member to the
+ msi_desc's union
+
+FSL-MC is a bus type different from PCI and platform, so it needs
+its own member in the msi_desc's union.
+
+Signed-off-by: J. German Rivera <German.Rivera at freescale.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
+---
+ include/linux/msi.h | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -33,6 +33,14 @@ struct platform_msi_desc {
+ };
+
+ /**
++ * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
++ * @msi_index: The index of the MSI descriptor
++ */
++struct fsl_mc_msi_desc {
++ u16 msi_index;
++};
++
++/**
+ * struct msi_desc - Descriptor structure for MSI based interrupts
+ * @list: List head for management
+ * @irq: The base interrupt number
+@@ -87,6 +95,7 @@ struct msi_desc {
+ * tree wide cleanup.
+ */
+ struct platform_msi_desc platform;
++ struct fsl_mc_msi_desc fsl_mc;
+ };
+ };
+
diff --git a/target/linux/layerscape/patches-4.4/7225-dpaa2-evb-fix-4.4-backport-compile-error.patch b/target/linux/layerscape/patches-4.4/7225-dpaa2-evb-fix-4.4-backport-compile-error.patch
new file mode 100644
index 0000000..2815d46
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/7225-dpaa2-evb-fix-4.4-backport-compile-error.patch
@@ -0,0 +1,21 @@
+From dbdf9b1fe83f88090d88bce980885df4fac46162 Mon Sep 17 00:00:00 2001
+From: Stuart Yoder <stuart.yoder at nxp.com>
+Date: Thu, 25 Aug 2016 11:17:52 -0500
+Subject: [PATCH 225/226] dpaa2-evb: fix 4.4 backport compile error
+
+Signed-off-by: Stuart Yoder <stuart.yoder at nxp.com>
+---
+ drivers/staging/fsl-dpaa2/evb/evb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/fsl-dpaa2/evb/evb.c
++++ b/drivers/staging/fsl-dpaa2/evb/evb.c
+@@ -1153,7 +1153,7 @@ static int evb_probe(struct fsl_mc_devic
+ }
+
+ rtnl_lock();
+- err = netdev_master_upper_dev_link(port_netdev, netdev, NULL, NULL);
++ err = netdev_master_upper_dev_link(port_netdev, netdev);
+ if (unlikely(err)) {
+ dev_err(dev, "netdev_master_upper_dev_link err %d\n",
+ err);
diff --git a/target/linux/layerscape/patches-4.4/8136-drivers-mmc-Add-compatible-string-for-LS1088A.patch b/target/linux/layerscape/patches-4.4/8136-drivers-mmc-Add-compatible-string-for-LS1088A.patch
new file mode 100644
index 0000000..cf0cecf
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/8136-drivers-mmc-Add-compatible-string-for-LS1088A.patch
@@ -0,0 +1,24 @@
+From f6f7c6ecdecfb75412a17205d9ac4905f6bc2851 Mon Sep 17 00:00:00 2001
+From: Rai Harninder <harninder.rai at nxp.com>
+Date: Thu, 18 Feb 2016 16:35:35 +0530
+Subject: [PATCH 136/141] drivers/mmc: Add compatible string for LS1088A
+
+Signed-off-by: Rai Harninder <harninder.rai at nxp.com>
+Signed-off-by: Pratiyush Mohan Srivastava <pratiyush.srivastava at nxp.com>
+Signed-off-by: Raghav Dogra <raghav.dogra at nxp.com>
+---
+ drivers/mmc/host/sdhci-pltfm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/mmc/host/sdhci-pltfm.c
++++ b/drivers/mmc/host/sdhci-pltfm.c
+@@ -93,6 +93,9 @@ void sdhci_get_of_property(struct platfo
+ if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
+ host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
+
++ if (of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
++ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
++
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc") ||
+ of_device_is_compatible(np, "fsl,p1010-esdhc") ||
+ of_device_is_compatible(np, "fsl,t4240-esdhc") ||
diff --git a/target/linux/layerscape/patches-4.4/8137-armv8-ls1088a-Add-PCIe-compatible.patch b/target/linux/layerscape/patches-4.4/8137-armv8-ls1088a-Add-PCIe-compatible.patch
new file mode 100644
index 0000000..d6137a8
--- /dev/null
+++ b/target/linux/layerscape/patches-4.4/8137-armv8-ls1088a-Add-PCIe-compatible.patch
@@ -0,0 +1,38 @@
+From 1aeb63c52ade1219032161fcdb923aa4c62b3796 Mon Sep 17 00:00:00 2001
+From: Prabhakar Kushwaha <prabhakar.kushwaha at nxp.com>
+Date: Sun, 9 Oct 2016 14:52:49 +0800
+Subject: [PATCH 137/141] armv8: ls1088a: Add PCIe compatible
+
+commit: 1a089a382b187c80390f022d1e3f3749b2adcc64
+[don't apply dtsi]
+
+Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha at nxp.com>
+Integrated-by: Zhao Qiang <qiang.zhao at nxp.com>
+---
+ drivers/pci/host/pci-layerscape.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/pci/host/pci-layerscape.c
++++ b/drivers/pci/host/pci-layerscape.c
+@@ -215,6 +215,13 @@ static struct ls_pcie_drvdata ls1046_drv
+ .ops = &ls_pcie_host_ops,
+ };
+
++static struct ls_pcie_drvdata ls1088_drvdata = {
++ .lut_offset = 0x80000,
++ .ltssm_shift = 0,
++ .lut_dbg = 0x407fc,
++ .ops = &ls_pcie_host_ops,
++};
++
+ static struct ls_pcie_drvdata ls2080_drvdata = {
+ .lut_offset = 0x80000,
+ .ltssm_shift = 0,
+@@ -227,6 +234,7 @@ static const struct of_device_id ls_pcie
+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
+ { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
++ { .compatible = "fsl,ls1088a-pcie", .data = &ls1088_drvdata },
+ { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
+ { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
+ { },
More information about the lede-commits
mailing list